diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index d63e1d45b5..cdb42845ca 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -76,7 +76,7 @@ type ChannelBuilder struct { outputBytes int } -// newChannelBuilder creates a new channel builder or returns an error if the +// NewChannelBuilder creates a new channel builder or returns an error if the // channel out could not be created. // it acts as a factory for either a span or singular channel out func NewChannelBuilder(cfg ChannelConfig, rollupCfg rollup.Config, latestL1OriginBlockNum uint64) (*ChannelBuilder, error) { @@ -156,7 +156,7 @@ func (c *ChannelBuilder) AddBlock(block *types.Block) (*derive.L1BlockInfo, erro return l1info, fmt.Errorf("converting block to batch: %w", err) } - if err = c.co.AddSingularBatch(batch, l1info.SequenceNumber); errors.Is(err, derive.ErrTooManyRLPBytes) || errors.Is(err, derive.ErrCompressorFull) { + if err = c.co.AddSingularBatch(&c.rollupCfg, batch, l1info.SequenceNumber); errors.Is(err, derive.ErrTooManyRLPBytes) || errors.Is(err, derive.ErrCompressorFull) { c.setFullErr(err) return l1info, c.FullErr() } else if err != nil { diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index acbdfd55ae..885a60f8f1 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -48,6 +48,8 @@ type channelManager struct { // if set to true, prevents production of any new channel frames closed bool + + isVolta bool } func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rollupCfg *rollup.Config) *channelManager { @@ -261,6 +263,14 @@ func (s *channelManager) processBlocks() error { latestL2ref eth.L2BlockRef ) for i, block := range s.blocks { + if !s.isVolta && s.rollupCfg.IsVolta(block.Time()) && s.currentChannel.InputBytes() != 0 { + // the current channel is before volta fork. + s.currentChannel.Close() + s.isVolta = true + log.Info("before volta fork channel", "channel_id", s.currentChannel.ID(), "block_time", block.Time()) + break + } + l1info, err := s.currentChannel.AddBlock(block) if errors.As(err, &_chFullErr) { // current block didn't get added because channel is already full @@ -298,6 +308,7 @@ func (s *channelManager) processBlocks() error { "channel_full", s.currentChannel.IsFull(), "input_bytes", s.currentChannel.InputBytes(), "ready_bytes", s.currentChannel.ReadyBytes(), + "is_volta", s.isVolta, ) return nil } @@ -354,6 +365,13 @@ func (s *channelManager) AddL2Block(block *types.Block) error { return ErrReorg } + if s.tip == (common.Hash{}) && s.rollupCfg.IsVolta(block.Time()) { + // set volta flag at startup + s.isVolta = true + log.Info("succeed to set is volta flag", "block_time", block.Time(), + "l2 block num", block.Number()) + } + s.metr.RecordL2BlockInPendingQueue(block) s.blocks = append(s.blocks, block) s.tip = block.Hash() @@ -362,11 +380,20 @@ func (s *channelManager) AddL2Block(block *types.Block) error { } func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo) eth.L2BlockRef { + milliPart := uint64(0) + if block.MixDigest() != (common.Hash{}) { + // adapts l2 millisecond, highest 2 bytes as milli-part. + milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) + } + + log.Debug("generate l2 block ref:", "milli-timestamp", milliPart, + "seconds-timestamp", block.Time(), "l2 block number", block.Number()) return eth.L2BlockRef{ Hash: block.Hash(), Number: block.NumberU64(), ParentHash: block.ParentHash(), Time: block.Time(), + MilliTime: milliPart, L1Origin: eth.BlockID{Hash: l1info.BlockHash, Number: l1info.Number}, SequenceNumber: l1info.SequenceNumber, } diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index e8234d978a..0242c7587e 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -305,6 +305,31 @@ type DeployConfig struct { UseInterop bool `json:"useInterop,omitempty"` } +func (d *DeployConfig) L1MillisecondBlockInterval() uint64 { + // convert second to millisecond + return d.L1BlockTime * 1000 +} + +func (d *DeployConfig) L2MillisecondBlockInterval() uint64 { + if d.L2BlockTime > 3 { + // has been millisecond + return d.L2BlockTime + } + // convert second to millisecond + return d.L2BlockTime * 1000 +} + +// L2SecondBlockInterval is just used by ut&e2e test. +// TODO: ut&e2e need to be refined later. +func (d *DeployConfig) L2SecondBlockInterval() uint64 { + if d.L2BlockTime <= 3 { + // has been second + return d.L2BlockTime + } + // convert millisecond to second + return d.L2BlockTime / 1000 +} + // Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy // which makes it easier to maintain, we do not need efficiency in this case. func (d *DeployConfig) Copy() *DeployConfig { @@ -434,9 +459,15 @@ func (d *DeployConfig) Check() error { return fmt.Errorf("%w: GovernanceToken owner cannot be address(0)", ErrInvalidDeployConfig) } } + if d.L2BlockTime <= 3 { + // TODO: too many tests depend it, tmp work around it + // convert ms l2 time interval + d.L2BlockTime = d.L2BlockTime * 1000 + } + // L2 block time must always be smaller than L1 block time - if d.L1BlockTime < d.L2BlockTime { - return fmt.Errorf("L2 block time (%d) is larger than L1 block time (%d)", d.L2BlockTime, d.L1BlockTime) + if d.L1MillisecondBlockInterval() < d.L2MillisecondBlockInterval() { + return fmt.Errorf("L2 block interval ms (%d) is larger than L1 block interval ms (%d)", d.L2MillisecondBlockInterval(), d.L1MillisecondBlockInterval()) } if d.RequiredProtocolVersion == (params.ProtocolVersion{}) { log.Warn("RequiredProtocolVersion is empty") @@ -585,6 +616,7 @@ func (d *DeployConfig) DeltaTime(genesisTime uint64) *uint64 { return &v } +// TODO judge if it is need to use milliseconds timestamp with the fork information func (d *DeployConfig) EcotoneTime(genesisTime uint64) *uint64 { if d.L2GenesisEcotoneTimeOffset == nil { return nil @@ -688,7 +720,7 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas GasLimit: uint64(d.L2GenesisBlockGasLimit), }, }, - BlockTime: d.L2BlockTime, + //BlockTime: d.L2BlockTime, MaxSequencerDrift: d.MaxSequencerDrift, SeqWindowSize: d.SequencerWindowSize, ChannelTimeout: d.ChannelTimeout, diff --git a/op-e2e/actions/dencun_fork_test.go b/op-e2e/actions/dencun_fork_test.go index 5e0f1706cb..ead5a098ac 100644 --- a/op-e2e/actions/dencun_fork_test.go +++ b/op-e2e/actions/dencun_fork_test.go @@ -124,7 +124,7 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) { cancunOffset := hexutil.Uint64(0) dp.DeployConfig.L1CancunTimeOffset = &cancunOffset // This test wil fork on the second block - offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime * 2) + offset := hexutil.Uint64(dp.DeployConfig.L2SecondBlockInterval() * 2) dp.DeployConfig.L2GenesisCanyonTimeOffset = &offset dp.DeployConfig.L2GenesisDeltaTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset diff --git a/op-e2e/actions/l2_sequencer_test.go b/op-e2e/actions/l2_sequencer_test.go index 05fa0242d1..dc38421fc4 100644 --- a/op-e2e/actions/l2_sequencer_test.go +++ b/op-e2e/actions/l2_sequencer_test.go @@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { origin := miner.l1Chain.CurrentBlock() // L2 makes blocks to catch up - for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time { + for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.SecondBlockInterval() < origin.Time { makeL2BlockWithAliceTx() require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches") } @@ -111,7 +111,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { sequencer.ActL1HeadSignal(t) // Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin - for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) { + for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.SecondBlockInterval() <= origin.Time+sd.ChainSpec.MaxSequencerDrift(origin.Time) { sequencer.ActL2KeepL1Origin(t) makeL2BlockWithAliceTx() require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") diff --git a/op-e2e/actions/user_test.go b/op-e2e/actions/user_test.go index c9692c91f0..53b227777a 100644 --- a/op-e2e/actions/user_test.go +++ b/op-e2e/actions/user_test.go @@ -119,10 +119,10 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { dp.DeployConfig.L2GenesisFjordTimeOffset = test.fjordTime if test.canyonTime != nil { - require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2BlockTime), "canyon fork must be aligned") + require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2SecondBlockInterval()), "canyon fork must be aligned") } if test.ecotoneTime != nil { - require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2BlockTime), "ecotone fork must be aligned") + require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2SecondBlockInterval()), "ecotone fork must be aligned") } sd := e2eutils.Setup(t, dp, defaultAlloc) diff --git a/op-e2e/e2eutils/setup.go b/op-e2e/e2eutils/setup.go index a86f28c5da..7f42ed56de 100644 --- a/op-e2e/e2eutils/setup.go +++ b/op-e2e/e2eutils/setup.go @@ -187,6 +187,11 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * PlasmaConfig: pcfg, } + if rollupCfg.BlockTime <= 3 { + // covert to ms timestamp + rollupCfg.BlockTime = rollupCfg.BlockTime * 1000 + } + require.NoError(t, rollupCfg.Check()) // Sanity check that the config is correct diff --git a/op-e2e/op_geth.go b/op-e2e/op_geth.go index 7cea17d43a..d356068cf2 100644 --- a/op-e2e/op_geth.go +++ b/op-e2e/op_geth.go @@ -211,8 +211,8 @@ func (d *OpGeth) StartBlockBuilding(ctx context.Context, attrs *eth.PayloadAttri // CreatePayloadAttributes creates a valid PayloadAttributes containing a L1Info deposit transaction followed by the supplied transactions. func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.PayloadAttributes, error) { - timestamp := d.L2Head.Timestamp + 2 - l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp)) + milliTimestamp := d.L2Head.MillisecondTimestamp() + 2*1000 // 2000 millisecond block interval + l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, milliTimestamp) if err != nil { return nil, err } @@ -228,17 +228,17 @@ func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.Payloa } var withdrawals *types.Withdrawals - if d.L2ChainConfig.IsCanyon(uint64(timestamp)) { + if d.L2ChainConfig.IsCanyon(milliTimestamp / 1000) { withdrawals = &types.Withdrawals{} } var parentBeaconBlockRoot *common.Hash - if d.L2ChainConfig.IsEcotone(uint64(timestamp)) { + if d.L2ChainConfig.IsEcotone(milliTimestamp / 1000) { parentBeaconBlockRoot = d.L1Head.ParentBeaconRoot() } attrs := eth.PayloadAttributes{ - Timestamp: timestamp, + Timestamp: eth.Uint64Quantity(milliTimestamp / 1000), Transactions: txBytes, NoTxPool: true, GasLimit: (*eth.Uint64Quantity)(&d.SystemConfig.GasLimit), diff --git a/op-e2e/setup.go b/op-e2e/setup.go index b0c969639a..3d8659bbac 100644 --- a/op-e2e/setup.go +++ b/op-e2e/setup.go @@ -547,6 +547,11 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste return nil, err } sys.RollupConfig = &defaultConfig + if sys.RollupConfig.BlockTime <= 3 { + // TODO: too many tests depend it, tmp work around it + // covert ms timestamp + sys.RollupConfig.BlockTime = sys.RollupConfig.BlockTime * 1000 + } // Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2 bcn := fakebeacon.NewBeacon(testlog.Logger(t, log.LevelInfo).New("role", "l1_cl"), diff --git a/op-e2e/system_adminrpc_test.go b/op-e2e/system_adminrpc_test.go index e7c0af673b..e29e9c7606 100644 --- a/op-e2e/system_adminrpc_test.go +++ b/op-e2e/system_adminrpc_test.go @@ -56,7 +56,7 @@ func TestStopStartSequencer(t *testing.T) { require.False(t, active, "sequencer should be inactive") blockBefore := latestBlock(t, l2Seq) - time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime+1) * time.Second) + time.Sleep(time.Duration(cfg.DeployConfig.L2SecondBlockInterval()+1) * time.Second) blockAfter := latestBlock(t, l2Seq) require.Equal(t, blockAfter, blockBefore, "Chain advanced after stopping sequencer") diff --git a/op-node/chaincfg/chains.go b/op-node/chaincfg/chains.go index d7256bad94..7eebf2c304 100644 --- a/op-node/chaincfg/chains.go +++ b/op-node/chaincfg/chains.go @@ -127,7 +127,7 @@ var OPBNBMainnet = rollup.Config{ GasLimit: 100000000, }, }, - BlockTime: 1, + //BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, @@ -163,7 +163,7 @@ var OPBNBTestnet = rollup.Config{ GasLimit: 100000000, }, }, - BlockTime: 1, + //BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, @@ -199,7 +199,7 @@ var OPBNBQANet = rollup.Config{ GasLimit: 100000000, }, }, - BlockTime: 1, + //BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, diff --git a/op-node/p2p/app_params.go b/op-node/p2p/app_params.go index e8bea84725..695b006744 100644 --- a/op-node/p2p/app_params.go +++ b/op-node/p2p/app_params.go @@ -24,7 +24,7 @@ type ApplicationScoreParams struct { } func LightApplicationScoreParams(cfg *rollup.Config) ApplicationScoreParams { - slot := time.Duration(cfg.BlockTime) * time.Second + slot := time.Duration(rollup.VoltBlockTime) * time.Millisecond if slot == 0 { slot = 2 * time.Second } diff --git a/op-node/p2p/peer_params.go b/op-node/p2p/peer_params.go index 6a60e2160b..6c76025b26 100644 --- a/op-node/p2p/peer_params.go +++ b/op-node/p2p/peer_params.go @@ -33,7 +33,7 @@ func ScoreDecay(duration time.Duration, slot time.Duration) float64 { // // [PeerScoreParams]: https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub@v0.8.1#PeerScoreParams func LightPeerScoreParams(cfg *rollup.Config) pubsub.PeerScoreParams { - slot := time.Duration(cfg.BlockTime) * time.Second + slot := time.Duration(rollup.VoltBlockTime) * time.Millisecond if slot == 0 { slot = 2 * time.Second } diff --git a/op-node/p2p/sync.go b/op-node/p2p/sync.go index c46eb6b365..58569f326c 100644 --- a/op-node/p2p/sync.go +++ b/op-node/p2p/sync.go @@ -683,7 +683,7 @@ func (s *SyncClient) doRequest(ctx context.Context, id peer.ID, expectedBlockNum } version := binary.LittleEndian.Uint32(versionData[:]) - isCanyon := s.cfg.IsCanyon(s.cfg.TimestampForBlock(expectedBlockNum)) + isCanyon := s.cfg.IsCanyon(s.cfg.MillisecondTimestampForBlock(expectedBlockNum) / 1000) envelope, err := readExecutionPayload(version, data, isCanyon) if err != nil { return err @@ -878,7 +878,7 @@ func (srv *ReqRespServer) handleSyncRequest(ctx context.Context, stream network. if req < srv.cfg.Genesis.L2.Number { return req, fmt.Errorf("cannot serve request for L2 block %d before genesis %d: %w", req, srv.cfg.Genesis.L2.Number, invalidRequestErr) } - max, err := srv.cfg.TargetBlockNumber(uint64(time.Now().Unix())) + max, err := srv.cfg.TargetBlockNumber(uint64(time.Now().UnixMilli())) if err != nil { return req, fmt.Errorf("cannot determine max target block number to verify request: %w", invalidRequestErr) } diff --git a/op-node/rollup/chain_spec.go b/op-node/rollup/chain_spec.go index f11f8c78b1..b43a5dc913 100644 --- a/op-node/rollup/chain_spec.go +++ b/op-node/rollup/chain_spec.go @@ -42,6 +42,7 @@ const ( Ecotone ForkName = "ecotone" Fjord ForkName = "fjord" Interop ForkName = "interop" + Volta ForkName = "volta" None ForkName = "none" ) @@ -52,7 +53,8 @@ var nextFork = map[ForkName]ForkName{ Delta: Ecotone, Ecotone: Fjord, Fjord: Interop, - Interop: None, + Interop: Volta, + Volta: None, } type ChainSpec struct { @@ -134,6 +136,9 @@ func (s *ChainSpec) CheckForkActivation(log log.Logger, block eth.L2BlockRef) { if s.config.IsInterop(block.Time) { s.currentFork = Interop } + if s.config.IsVolta(block.Time) { + s.currentFork = Volta + } log.Info("Current hardfork version detected", "forkName", s.currentFork) return } @@ -153,6 +158,8 @@ func (s *ChainSpec) CheckForkActivation(log log.Logger, block eth.L2BlockRef) { foundActivationBlock = s.config.IsFjordActivationBlock(block.Time) case Interop: foundActivationBlock = s.config.IsInteropActivationBlock(block.Time) + case Volta: + foundActivationBlock = s.config.IsVoltaActivationBlock(block.MillisecondTimestamp()) } if foundActivationBlock { diff --git a/op-node/rollup/chain_spec_test.go b/op-node/rollup/chain_spec_test.go index b6547835cd..b006bdf704 100644 --- a/op-node/rollup/chain_spec_test.go +++ b/op-node/rollup/chain_spec_test.go @@ -33,7 +33,7 @@ var testConfig = Config{ GasLimit: 30_000_000, }, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 3600, ChannelTimeout: 300, diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index 933a946c13..50a82ba804 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -5,14 +5,14 @@ import ( "fmt" "math/big" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/bsc" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/predeploys" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" ) var ( @@ -85,6 +85,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex return nil, NewCriticalError(fmt.Errorf("failed to derive some deposits: %w", err)) } // apply sysCfg changes + // TODO: may need to pass l1origin milli-timestamp later if IsEcotone() use the milli-timestamp if err := UpdateSystemConfigWithL1Receipts(&sysConfig, receipts, ba.rollupCfg, info.Time()); err != nil { return nil, NewCriticalError(fmt.Errorf("failed to apply derived L1 sysCfg updates: %w", err)) } @@ -107,7 +108,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex // Calculate bsc block base fee var l1BaseFee *big.Int - if ba.rollupCfg.IsSnow(l2Parent.Time + ba.rollupCfg.BlockTime) { + if ba.rollupCfg.IsSnow(ba.rollupCfg.NextSecondBlockTime(l2Parent.MillisecondTimestamp())) { l1BaseFee, err = SnowL1GasPrice(ctx, ba, epoch) if err != nil { return nil, err @@ -124,21 +125,21 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex l1Info = bsc.NewBlockInfoBSCWrapper(l1Info, l1BaseFee) // Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2 - nextL2Time := l2Parent.Time + ba.rollupCfg.BlockTime - if nextL2Time < l1Info.Time() { + nextL2MilliTime := ba.rollupCfg.NextMillisecondBlockTime(l2Parent.MillisecondTimestamp()) + if nextL2MilliTime < l1Info.MillisecondTimestamp() { return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d", - l2Parent, nextL2Time, eth.ToBlockID(l1Info), l1Info.Time())) + l2Parent, nextL2MilliTime, eth.ToBlockID(l1Info), l1Info.MillisecondTimestamp())) } var upgradeTxs []hexutil.Bytes - if ba.rollupCfg.IsEcotoneActivationBlock(nextL2Time) { + if ba.rollupCfg.IsEcotoneActivationBlock(nextL2MilliTime / 1000) { upgradeTxs, err = EcotoneNetworkUpgradeTransactions() if err != nil { return nil, NewCriticalError(fmt.Errorf("failed to build ecotone network upgrade txs: %w", err)) } } - if ba.rollupCfg.IsFjordActivationBlock(nextL2Time) { + if ba.rollupCfg.IsFjordActivationBlock(nextL2MilliTime / 1000) { fjord, err := FjordNetworkUpgradeTransactions() if err != nil { return nil, NewCriticalError(fmt.Errorf("failed to build fjord network upgrade txs: %w", err)) @@ -146,7 +147,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex upgradeTxs = append(upgradeTxs, fjord...) } - l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time) + l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2MilliTime) if err != nil { return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err)) } @@ -157,20 +158,19 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex txs = append(txs, upgradeTxs...) var withdrawals *types.Withdrawals - if ba.rollupCfg.IsCanyon(nextL2Time) { + if ba.rollupCfg.IsCanyon(nextL2MilliTime / 1000) { withdrawals = &types.Withdrawals{} } var parentBeaconRoot *common.Hash - if ba.rollupCfg.IsEcotone(nextL2Time) { + if ba.rollupCfg.IsEcotone(nextL2MilliTime / 1000) { parentBeaconRoot = l1Info.ParentBeaconRoot() if parentBeaconRoot == nil { // default to zero hash if there is no beacon-block-root available parentBeaconRoot = new(common.Hash) } } - return ð.PayloadAttributes{ - Timestamp: hexutil.Uint64(nextL2Time), + pa := ð.PayloadAttributes{ PrevRandao: eth.Bytes32(l1Info.MixDigest()), SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr, Transactions: txs, @@ -178,7 +178,16 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex GasLimit: (*eth.Uint64Quantity)(&sysConfig.GasLimit), Withdrawals: withdrawals, ParentBeaconBlockRoot: parentBeaconRoot, - }, nil + } + + isVoltaTime := ba.rollupCfg.IsVolta(nextL2MilliTime / 1000) + pa.SetMillisecondTimestamp(nextL2MilliTime, isVoltaTime) + if isVoltaTime { + log.Debug("succeed to build payload attributes after fork", + "timestamp_ms", nextL2MilliTime, "seconds-timestamp", pa.Timestamp, + "l1 origin", l1Info.NumberU64(), "l2 parent block", l2Parent.Number) + } + return pa, nil } func (ba *FetchingAttributesBuilder) CachePayloadByHash(payload *eth.ExecutionPayloadEnvelope) bool { diff --git a/op-node/rollup/derive/attributes_queue.go b/op-node/rollup/derive/attributes_queue.go index f9266112b9..3e5218a7f7 100644 --- a/op-node/rollup/derive/attributes_queue.go +++ b/op-node/rollup/derive/attributes_queue.go @@ -88,7 +88,7 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Sing return nil, NewResetError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, l2SafeHead.Hash)) } // sanity check timestamp - if expected := l2SafeHead.Time + aq.config.BlockTime; expected != batch.Timestamp { + if expected := aq.config.NextMillisecondBlockTime(l2SafeHead.MillisecondTimestamp()); expected != batch.Timestamp { return nil, NewResetError(fmt.Errorf("valid batch has bad timestamp %d, expected %d", batch.Timestamp, expected)) } fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second) diff --git a/op-node/rollup/derive/batch_queue.go b/op-node/rollup/derive/batch_queue.go index 3dbfe20d30..566c6694cc 100644 --- a/op-node/rollup/derive/batch_queue.go +++ b/op-node/rollup/derive/batch_queue.go @@ -96,7 +96,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context, parent eth.L2BlockRef) (*Si if len(bq.nextSpan) > 0 { // There are cached singular batches derived from the span batch. // Check if the next cached batch matches the given parent block. - if bq.nextSpan[0].Timestamp == parent.Time+bq.config.BlockTime { + if bq.nextSpan[0].Timestamp == bq.config.NextMillisecondBlockTime(parent.MillisecondTimestamp()) { // Pop first one and return. nextBatch := bq.popNextBatch(parent) // len(bq.nextSpan) == 0 means it's the last batch of the span. @@ -257,7 +257,7 @@ func (bq *BatchQueue) deriveNextBatch(ctx context.Context, outOfData bool, paren // Find the first-seen batch that matches all validity conditions. // We may not have sufficient information to proceed filtering, and then we stop. // There may be none: in that case we force-create an empty batch - nextTimestamp := parent.Time + bq.config.BlockTime + nextMilliTimestamp := bq.config.NextMillisecondBlockTime(parent.MillisecondTimestamp()) var nextBatch *BatchWithL1InclusionBlock // Go over all batches, in order of inclusion, and find the first batch we can accept. @@ -304,7 +304,7 @@ batchLoop: firstOfEpoch := epoch.Number == parent.L1Origin.Number+1 bq.log.Trace("Potentially generating an empty batch", - "expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "nextTimestamp", nextTimestamp, + "expiryEpoch", expiryEpoch, "forceEmptyBatches", forceEmptyBatches, "next_ms_timestamp", nextMilliTimestamp, "epoch_time", epoch.Time, "len_l1_blocks", len(bq.l1Blocks), "firstOfEpoch", firstOfEpoch) if !forceEmptyBatches { @@ -321,20 +321,20 @@ batchLoop: // Fill with empty L2 blocks of the same epoch until we meet the time of the next L1 origin, // to preserve that L2 time >= L1 time. If this is the first block of the epoch, always generate a // batch to ensure that we at least have one batch per epoch. - if nextTimestamp < nextEpoch.Time || firstOfEpoch { - bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp) + if nextMilliTimestamp < nextEpoch.MillisecondTimestamp() || firstOfEpoch { + bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextMilliTimestamp) return &SingularBatch{ ParentHash: parent.Hash, EpochNum: rollup.Epoch(epoch.Number), EpochHash: epoch.Hash, - Timestamp: nextTimestamp, + Timestamp: nextMilliTimestamp, Transactions: nil, }, nil } // At this point we have auto generated every batch for the current epoch // that we can, so we can advance to the next epoch. - bq.log.Trace("Advancing internal L1 blocks", "next_timestamp", nextTimestamp, "next_epoch_time", nextEpoch.Time) + bq.log.Trace("Advancing internal L1 blocks", "next_ms_timestamp", nextMilliTimestamp, "next_epoch_ms_time", nextEpoch.MillisecondTimestamp()) bq.l1Blocks = bq.l1Blocks[1:] return nil, io.EOF } diff --git a/op-node/rollup/derive/batch_queue_test.go b/op-node/rollup/derive/batch_queue_test.go index 6712ae15a6..d0e5af675c 100644 --- a/op-node/rollup/derive/batch_queue_test.go +++ b/op-node/rollup/derive/batch_queue_test.go @@ -55,7 +55,7 @@ func b(chainId *big.Int, timestamp uint64, epoch eth.L1BlockRef) *SingularBatch txData, _ := tx.MarshalBinary() return &SingularBatch{ ParentHash: mockHash(timestamp-2, 2), - Timestamp: timestamp, + Timestamp: timestamp * 1000, EpochNum: rollup.Epoch(epoch.Number), EpochHash: epoch.Hash, Transactions: []hexutil.Bytes{txData}, @@ -101,10 +101,10 @@ func singularBatchToPayload(t *testing.T, batch *SingularBatch, blockNumber uint txs = append(txs, batch.Transactions...) return eth.ExecutionPayloadEnvelope{ ExecutionPayload: ð.ExecutionPayload{ - BlockHash: mockHash(batch.Timestamp, 2), + BlockHash: mockHash(batch.Timestamp/1000, 2), ParentHash: batch.ParentHash, BlockNumber: hexutil.Uint64(blockNumber), - Timestamp: hexutil.Uint64(batch.Timestamp), + Timestamp: hexutil.Uint64(batch.Timestamp / 1000), Transactions: txs, }, } @@ -112,7 +112,7 @@ func singularBatchToPayload(t *testing.T, batch *SingularBatch, blockNumber uint func singularBatchToBlockRef(t *testing.T, batch *SingularBatch, blockNumber uint64) eth.L2BlockRef { return eth.L2BlockRef{ - Hash: mockHash(batch.Timestamp, 2), + Hash: mockHash(batch.Timestamp/1000, 2), Number: blockNumber, ParentHash: batch.ParentHash, Time: batch.Timestamp, @@ -242,7 +242,7 @@ func BatchQueueEager(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -295,8 +295,8 @@ func BatchQueueEager(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.SecondBlockInterval() + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -320,7 +320,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 2, DeltaTime: getDeltaTime(batchType), @@ -373,7 +373,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -396,7 +396,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) require.NotNil(t, b) - require.Equal(t, safeHead.Time+2, b.Timestamp) + require.Equal(t, (safeHead.Time+2)*1000, b.Timestamp) require.Equal(t, rollup.Epoch(1), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 @@ -412,7 +412,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { require.Nil(t, e) require.NotNil(t, b) require.Equal(t, rollup.Epoch(2), b.EpochNum) - require.Equal(t, safeHead.Time+2, b.Timestamp) + require.Equal(t, (safeHead.Time+2)*1000, b.Timestamp) safeHead.Number += 1 safeHead.Time += 2 safeHead.Hash = mockHash(b.Timestamp, 2) @@ -432,6 +432,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { Number: 0, ParentHash: common.Hash{}, Time: 10, + MilliTime: 0, L1Origin: l1[0].ID(), SequenceNumber: 0, } @@ -439,7 +440,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 2, DeltaTime: getDeltaTime(batchType), @@ -497,22 +498,22 @@ func BatchQueueMissing(t *testing.T, batchType int) { // Check for a generated batch at t = 12 b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(12)) + require.Equal(t, b.Timestamp, uint64(12000)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) // Check for generated batch at t = 14 b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(14)) + require.Equal(t, b.Timestamp, uint64(14000)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) // Check for the inputted batch at t = 16 b, _, e = bq.NextBatch(context.Background(), safeHead) @@ -521,7 +522,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) // Advance the origin. At this point the batch with timestamp 18 will be created input.origin = l1[3] @@ -533,7 +534,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { require.Equal(t, e, io.EOF) b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(18)) + require.Equal(t, b.Timestamp, uint64(18000)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(1), b.EpochNum) } @@ -556,7 +557,7 @@ func BatchQueueAdvancedEpoch(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -619,8 +620,8 @@ func BatchQueueAdvancedEpoch(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.SecondBlockInterval() + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -643,7 +644,7 @@ func BatchQueueShuffle(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -718,8 +719,8 @@ func BatchQueueShuffle(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.SecondBlockInterval() + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -741,7 +742,7 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -821,8 +822,8 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) { } else { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.SecondBlockInterval() + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -846,7 +847,7 @@ func TestBatchQueueComplex(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -939,8 +940,8 @@ func TestBatchQueueComplex(t *testing.T) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Time += cfg.SecondBlockInterval() + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } } @@ -964,7 +965,7 @@ func TestBatchQueueResetSpan(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -997,8 +998,8 @@ func TestBatchQueueResetSpan(t *testing.T) { // This NextBatch() will return the second singular batch. safeHead.Number += 1 - safeHead.Time += cfg.BlockTime - safeHead.Hash = mockHash(nextBatch.Timestamp, 2) + safeHead.Time += cfg.SecondBlockInterval() + safeHead.Hash = mockHash(nextBatch.Timestamp/1000, 2) safeHead.L1Origin = nextBatch.Epoch() nextBatch, _, err = bq.NextBatch(context.Background(), safeHead) require.NoError(t, err) diff --git a/op-node/rollup/derive/batch_test.go b/op-node/rollup/derive/batch_test.go index 3dc554a593..b7b63ea443 100644 --- a/op-node/rollup/derive/batch_test.go +++ b/op-node/rollup/derive/batch_test.go @@ -78,7 +78,7 @@ func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch { func RandomValidConsecutiveSingularBatches(rng *rand.Rand, chainID *big.Int) []*SingularBatch { blockCount := 2 + rng.Intn(128) - l2BlockTime := uint64(2) + l2BlockTime := uint64(2) * 1000 // ms var singularBatches []*SingularBatch for i := 0; i < blockCount; i++ { @@ -87,7 +87,7 @@ func RandomValidConsecutiveSingularBatches(rng *rand.Rand, chainID *big.Int) []* } l1BlockNum := rng.Uint64() // make sure oldest timestamp is large enough - singularBatches[0].Timestamp += 256 + singularBatches[0].Timestamp += 256 * 1000 // ms for i := 0; i < blockCount; i++ { originChangedBit := rng.Intn(2) if originChangedBit == 1 { diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index bde2280745..4f116f27b8 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -67,13 +67,13 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo } epoch := l1Blocks[0] - nextTimestamp := l2SafeHead.Time + cfg.BlockTime - if batch.Timestamp > nextTimestamp { - log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp) + nextMilliTimestamp := cfg.NextMillisecondBlockTime(l2SafeHead.MillisecondTimestamp()) + if batch.Timestamp > nextMilliTimestamp { + log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextMilliTimestamp) return BatchFuture } - if batch.Timestamp < nextTimestamp { - log.Warn("dropping batch with old timestamp", "min_timestamp", nextTimestamp) + if batch.Timestamp < nextMilliTimestamp { + log.Warn("dropping batch with old timestamp", "batch_timestamp", batch.Timestamp, "min_timestamp", nextMilliTimestamp) return BatchDrop } @@ -118,14 +118,14 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchDrop } - if batch.Timestamp < batchOrigin.Time { - log.Warn("batch timestamp is less than L1 origin timestamp", "l2_timestamp", batch.Timestamp, "l1_timestamp", batchOrigin.Time, "origin", batchOrigin.ID()) + if batch.Timestamp < batchOrigin.MillisecondTimestamp() { + log.Warn("batch timestamp is less than L1 origin timestamp", "l2_ms_timestamp", batch.Timestamp, "l1_ms_timestamp", batchOrigin.MillisecondTimestamp(), "origin", batchOrigin.ID()) return BatchDrop } spec := rollup.NewChainSpec(cfg) // Check if we ran out of sequencer time drift - if max := batchOrigin.Time + spec.MaxSequencerDrift(batchOrigin.Time); batch.Timestamp > max { + if max := batchOrigin.MillisecondTimestamp() + spec.MaxSequencerDrift(batchOrigin.Time)*1000; batch.Timestamp > max { if len(batch.Transactions) == 0 { // If the sequencer is co-operating by producing an empty batch, // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. @@ -136,7 +136,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo return BatchUndecided } nextOrigin := l1Blocks[1] - if batch.Timestamp >= nextOrigin.Time { // check if the next L1 origin could have been adopted + if batch.Timestamp >= nextOrigin.MillisecondTimestamp() { // check if the next L1 origin could have been adopted log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") return BatchDrop } else { @@ -194,13 +194,13 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B return BatchDrop } - nextTimestamp := l2SafeHead.Time + cfg.BlockTime + nextMilliTimestamp := cfg.NextMillisecondBlockTime(l2SafeHead.MillisecondTimestamp()) - if batch.GetTimestamp() > nextTimestamp { - log.Trace("received out-of-order batch for future processing after next batch", "next_timestamp", nextTimestamp) + if batch.GetTimestamp() > nextMilliTimestamp { + log.Trace("received out-of-order batch for future processing after next batch", "next_ms_timestamp", nextMilliTimestamp) return BatchFuture } - if batch.GetBlockTimestamp(batch.GetBlockCount()-1) < nextTimestamp { + if batch.GetBlockTimestamp(batch.GetBlockCount()-1) < nextMilliTimestamp { log.Warn("span batch has no new blocks after safe head") return BatchDrop } @@ -209,18 +209,25 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B // if the span batch does not overlap the current safe chain, parentBLock should be l2SafeHead. parentNum := l2SafeHead.Number parentBlock := l2SafeHead - if batch.GetTimestamp() < nextTimestamp { - if batch.GetTimestamp() > l2SafeHead.Time { + if batch.GetTimestamp() < nextMilliTimestamp { + if batch.GetTimestamp() > l2SafeHead.MillisecondTimestamp() { // batch timestamp cannot be between safe head and next timestamp log.Warn("batch has misaligned timestamp, block time is too short") return BatchDrop } - if (l2SafeHead.Time-batch.GetTimestamp())%cfg.BlockTime != 0 { + if (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())%rollup.VoltBlockTime != 0 { log.Warn("batch has misaligned timestamp, not overlapped exactly") return BatchDrop } - parentNum = l2SafeHead.Number - (l2SafeHead.Time-batch.GetTimestamp())/cfg.BlockTime - 1 + //parentNum = l2SafeHead.Number - (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())/cfg.MillisecondBlockInterval() - 1 var err error + parentNum, err = cfg.TargetBlockNumber(batch.GetTimestamp()) + if err != nil { + log.Warn("failed to computer batch parent number", "batch_ms_time", batch.GetTimestamp(), "err", err) + // unable to validate the batch for now. retry later. + return BatchUndecided + } + parentNum = parentNum - 1 parentBlock, err = l2Fetcher.L2BlockRefByNumber(ctx, parentNum) if err != nil { log.Warn("failed to fetch L2 block", "number", parentNum, "err", err) @@ -272,7 +279,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B originAdvanced := startEpochNum == parentBlock.L1Origin.Number+1 for i := 0; i < batch.GetBlockCount(); i++ { - if batch.GetBlockTimestamp(i) <= l2SafeHead.Time { + if batch.GetBlockTimestamp(i) <= l2SafeHead.MillisecondTimestamp() { continue } var l1Origin eth.L1BlockRef @@ -290,14 +297,14 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B } } blockTimestamp := batch.GetBlockTimestamp(i) - if blockTimestamp < l1Origin.Time { + if blockTimestamp < l1Origin.MillisecondTimestamp() { log.Warn("block timestamp is less than L1 origin timestamp", "l2_timestamp", blockTimestamp, "l1_timestamp", l1Origin.Time, "origin", l1Origin.ID()) return BatchDrop } spec := rollup.NewChainSpec(cfg) // Check if we ran out of sequencer time drift - if max := l1Origin.Time + spec.MaxSequencerDrift(l1Origin.Time); blockTimestamp > max { + if max := l1Origin.MillisecondTimestamp() + spec.MaxSequencerDrift(l1Origin.Time)*1000; blockTimestamp > max { if len(batch.GetBlockTransactions(i)) == 0 { // If the sequencer is co-operating by producing an empty batch, // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. @@ -307,7 +314,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B log.Info("without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid") return BatchUndecided } - if blockTimestamp >= l1Blocks[originIdx+1].Time { // check if the next L1 origin could have been adopted + if blockTimestamp >= l1Blocks[originIdx+1].MillisecondTimestamp() { // check if the next L1 origin could have been adopted log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") return BatchDrop } else { @@ -335,7 +342,7 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B } // Check overlapped blocks - if batch.GetTimestamp() < nextTimestamp { + if batch.GetTimestamp() < nextMilliTimestamp { for i := uint64(0); i < l2SafeHead.Number-parentNum; i++ { safeBlockNum := parentNum + i + 1 safeBlockPayload, err := l2Fetcher.PayloadByNumber(ctx, safeBlockNum) diff --git a/op-node/rollup/derive/batches_test.go b/op-node/rollup/derive/batches_test.go index 125fc0f02e..b3eb4309c3 100644 --- a/op-node/rollup/derive/batches_test.go +++ b/op-node/rollup/derive/batches_test.go @@ -63,7 +63,7 @@ func TestValidBatch(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 31, // a genesis time that itself does not align to make it more interesting }, - BlockTime: defaultBlockTime, + BlockTime: defaultBlockTime * 1000, SeqWindowSize: 4, MaxSequencerDrift: 6, } @@ -241,7 +241,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, @@ -257,7 +257,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time + 1, // 1 too high + Timestamp: (l2A1.Time + 1) * 1000, // 1 too high Transactions: nil, }, }, @@ -273,13 +273,13 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A0.Time, // repeating the same time + Timestamp: l2A0.MillisecondTimestamp(), // repeating the same time Transactions: nil, }, }, Expected: BatchDrop, }, - { + { // TODO: Name: "misaligned timestamp", L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, L2SafeHead: l2A0, @@ -289,7 +289,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low + Timestamp: (l2A1.Time - 1) * 1000, // block time is 2, so this is 1 too low Transactions: nil, }, }, @@ -305,7 +305,7 @@ func TestValidBatch(t *testing.T) { ParentHash: testutils.RandomHash(rng), EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, @@ -321,7 +321,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, @@ -337,7 +337,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, // build on top of safe head to continue EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2B0.Time + defaultBlockTime, // pass the timestamp check to get too epoch check + Timestamp: (l2B0.Time + defaultBlockTime) * 1000, // pass the timestamp check to get too epoch check Transactions: nil, }, }, @@ -353,7 +353,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, @@ -369,7 +369,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C EpochHash: l1C.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, @@ -385,7 +385,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, @@ -401,7 +401,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, }, }, @@ -417,7 +417,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{[]byte("sequencer should include this tx")}, }, }, @@ -434,7 +434,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, }, }, @@ -450,7 +450,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, @@ -466,7 +466,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: nil, }, }, @@ -482,7 +482,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, @@ -498,7 +498,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, @@ -514,7 +514,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{}, // empty tx data }, @@ -532,7 +532,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{types.DepositTxType, 0}, // piece of data alike to a deposit }, @@ -550,7 +550,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{0x02, 0x42, 0x13, 0x37}, []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, @@ -569,7 +569,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{0x02, 0x42, 0x13, 0x37}, []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, @@ -588,7 +588,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2A2.Time + defaultBlockTime, + Timestamp: l2A2.MillisecondTimestamp() + defaultBlockTime*1000, Transactions: nil, }, }, @@ -607,7 +607,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -627,7 +627,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time + 1, // 1 too high + Timestamp: (l2A1.Time + 1) * 1000, // 1 too high Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -647,7 +647,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low + Timestamp: (l2A1.Time - 1) * 1000, // block time is 2, so this is 1 too low Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -667,7 +667,7 @@ func TestValidBatch(t *testing.T) { ParentHash: testutils.RandomHash(rng), EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -687,7 +687,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -707,13 +707,13 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, // build on top of safe head to continue EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2B0.Time + defaultBlockTime, // pass the timestamp check to get too epoch check + Timestamp: (l2B0.Time + defaultBlockTime) * 1000, // pass the timestamp check to get too epoch check Transactions: nil, }, { EpochNum: rollup.Epoch(l1B.Number), EpochHash: l1B.Hash, // pass the l1 origin check - Timestamp: l2B0.Time + defaultBlockTime*2, + Timestamp: (l2B0.Time + defaultBlockTime*2) * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -733,7 +733,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -753,14 +753,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -780,7 +780,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C EpochHash: l1C.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -800,7 +800,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -820,14 +820,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l1A.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -847,7 +847,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -867,7 +867,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -886,14 +886,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -913,7 +913,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -933,7 +933,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -952,14 +952,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2Y0.ParentHash, EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: nil, }, { ParentHash: l2Z0.ParentHash, EpochNum: rollup.Epoch(l2Z0.L1Origin.Number), EpochHash: l2Z0.L1Origin.Hash, - Timestamp: l2Z0.Time, // valid, but more than 6 ahead of l1Y.Time + Timestamp: l2Z0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -979,7 +979,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -999,14 +999,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1026,7 +1026,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1046,14 +1046,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A3.ParentHash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 ParentHash: l2A4.ParentHash, EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.Time, + Timestamp: l2A4.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1073,7 +1073,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{}, // empty tx data }, @@ -1095,7 +1095,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{ []byte{types.DepositTxType, 0}, // piece of data alike to a deposit }, @@ -1117,7 +1117,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -1136,7 +1136,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.ParentHash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -1155,7 +1155,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2A2.Time + defaultBlockTime, + Timestamp: (l2A2.Time + defaultBlockTime) * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1175,14 +1175,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { // we build l2B0, which starts a new epoch too early ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2A2.Time + defaultBlockTime, + Timestamp: (l2A2.Time + defaultBlockTime) * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1202,14 +1202,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1228,21 +1228,21 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1261,14 +1261,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1288,14 +1288,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1315,14 +1315,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number) + 1, EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1342,14 +1342,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1369,21 +1369,21 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.ParentHash, EpochNum: rollup.Epoch(l2A0.L1Origin.Number), EpochHash: l2A0.L1Origin.Hash, - Timestamp: l2A0.Time, + Timestamp: l2A0.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.Time, + Timestamp: l2A2.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1403,14 +1403,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A0.Time + 1, + Timestamp: (l2A0.Time + 1) * 1000, Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A1.Time + 1, + Timestamp: (l2A1.Time + 1) * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1430,14 +1430,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A0.Hash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A0.Time - 1, + Timestamp: (l2A0.Time - 1) * 1000, Transactions: nil, }, { ParentHash: l2A1.Hash, EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1457,14 +1457,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A2.Hash, EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.Time, + Timestamp: l2A3.MillisecondTimestamp(), Transactions: nil, }, { ParentHash: l2A3.Hash, EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.Time, + Timestamp: l2B0.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1483,7 +1483,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, @@ -1501,7 +1501,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -1520,7 +1520,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, @@ -1538,7 +1538,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l2A1.ParentHash, EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.Time, + Timestamp: l2A1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, }, uint64(0), big.NewInt(0)), @@ -1582,6 +1582,7 @@ func TestValidBatch(t *testing.T) { if mod := testCase.ConfigMod; mod != nil { mod(rcfg) } + // TODO validity := CheckBatch(ctx, rcfg, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, &l2Client) require.Equal(t, testCase.Expected, validity, "batch check must return expected validity level") if expLog := testCase.ExpectedLog; expLog != "" { @@ -1644,14 +1645,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, EpochNum: rollup.Epoch(l2B1.L1Origin.Number), EpochHash: l2B1.L1Origin.Hash, - Timestamp: l2B1.Time, + Timestamp: l2B1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, // Random generated TX that does not match overlapping block }, { ParentHash: l2B1.Hash, EpochNum: rollup.Epoch(l2B2.L1Origin.Number), EpochHash: l2B2.L1Origin.Hash, - Timestamp: l2B2.Time, + Timestamp: l2B2.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1689,14 +1690,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, EpochNum: rollup.Epoch(l2B1.L1Origin.Number), EpochHash: l2B1.L1Origin.Hash, - Timestamp: l2B1.Time, + Timestamp: l2B1.MillisecondTimestamp(), Transactions: []hexutil.Bytes{randTxData}, }, { ParentHash: l2B1.Hash, EpochNum: rollup.Epoch(l2B2.L1Origin.Number), EpochHash: l2B2.L1Origin.Hash, - Timestamp: l2B2.Time, + Timestamp: l2B2.MillisecondTimestamp(), Transactions: nil, }, }, uint64(0), big.NewInt(0)), diff --git a/op-node/rollup/derive/channel_in_reader.go b/op-node/rollup/derive/channel_in_reader.go index f7dde867bc..959f767ac2 100644 --- a/op-node/rollup/derive/channel_in_reader.go +++ b/op-node/rollup/derive/channel_in_reader.go @@ -105,7 +105,9 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { // This is just for early dropping invalid batches as soon as possible. return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time)) } - batch.Batch, err = DeriveSpanBatch(batchData, cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) + // double check + // temp VoltBlockTime + batch.Batch, err = DeriveSpanBatch(batchData, cr.cfg, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) if err != nil { return nil, err } diff --git a/op-node/rollup/derive/channel_out.go b/op-node/rollup/derive/channel_out.go index 2142796bab..da48d54177 100644 --- a/op-node/rollup/derive/channel_out.go +++ b/op-node/rollup/derive/channel_out.go @@ -8,8 +8,11 @@ import ( "io" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -54,7 +57,7 @@ type ChannelOut interface { ID() ChannelID Reset() error AddBlock(*rollup.Config, *types.Block) error - AddSingularBatch(*SingularBatch, uint64) error + AddSingularBatch(*rollup.Config, *SingularBatch, uint64) error InputBytes() int ReadyBytes() int Flush() error @@ -117,7 +120,7 @@ func (co *SingularChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Bl if err != nil { return err } - return co.AddSingularBatch(batch, l1Info.SequenceNumber) + return co.AddSingularBatch(rollupCfg, batch, l1Info.SequenceNumber) } // AddSingularBatch adds a batch to the channel. It returns the RLP encoded byte size @@ -128,7 +131,7 @@ func (co *SingularChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Bl // AddSingularBatch should be used together with BlockToBatch if you need to access the // BatchData before adding a block to the channel. It isn't possible to access // the batch data with AddBlock. -func (co *SingularChannelOut) AddSingularBatch(batch *SingularBatch, _ uint64) error { +func (co *SingularChannelOut) AddSingularBatch(cfg *rollup.Config, batch *SingularBatch, _ uint64) error { if co.closed { return ErrChannelOutAlreadyClosed } @@ -234,16 +237,31 @@ func BlockToSingularBatch(rollupCfg *rollup.Config, block *types.Block) (*Singul if l1InfoTx.Type() != types.DepositTxType { return nil, nil, ErrNotDepositTx } - l1Info, err := L1BlockInfoFromBytes(rollupCfg, block.Time(), l1InfoTx.Data()) + l1Info, err := L1BlockInfoFromBytes(rollupCfg, block.Time() /*second timestamp for fork*/, l1InfoTx.Data()) if err != nil { return nil, l1Info, fmt.Errorf("could not parse the L1 Info deposit: %w", err) } + ts := uint64(0) + isVolta := rollupCfg.IsVolta(block.Time()) + if isVolta { // after volta fork + milliPart := uint64(0) + if block.MixDigest() != (common.Hash{}) { + milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) + } + ts = block.Time()*1000 + milliPart + log.Debug("succeed to transform singular batch after fork", + "timestamp_ms", milliPart, "seconds-timestamp", block.Time(), + "l2 block", block.Number(), "l1 origin", l1Info.Number) + } else { // before volta fork + ts = block.Time() + } + return &SingularBatch{ ParentHash: block.ParentHash(), EpochNum: rollup.Epoch(l1Info.Number), EpochHash: l1Info.BlockHash, - Timestamp: block.Time(), + Timestamp: ts, Transactions: opaqueTxs, }, l1Info, nil } diff --git a/op-node/rollup/derive/engine_queue.go b/op-node/rollup/derive/engine_queue.go index c89456585f..bfce6dc8ff 100644 --- a/op-node/rollup/derive/engine_queue.go +++ b/op-node/rollup/derive/engine_queue.go @@ -284,9 +284,9 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System if err != nil { return NewTemporaryError(fmt.Errorf("failed to fetch the new L1 progress: origin: %v; err: %w", safe.L1Origin, err)) } - if safe.Time < l1Origin.Time { + if safe.MillisecondTimestamp() < l1Origin.MillisecondTimestamp() { return NewResetError(fmt.Errorf("cannot reset block derivation to start at L2 block %s with time %d older than its L1 origin %s with time %d, time invariant is broken", - safe, safe.Time, l1Origin, l1Origin.Time)) + safe, safe.MillisecondTimestamp(), l1Origin, l1Origin.MillisecondTimestamp())) } // Walk back L2 chain to find the L1 origin that is old enough to start buffering channel data from. diff --git a/op-node/rollup/derive/engine_update.go b/op-node/rollup/derive/engine_update.go index 656e52474b..4b067165b7 100644 --- a/op-node/rollup/derive/engine_update.go +++ b/op-node/rollup/derive/engine_update.go @@ -203,7 +203,7 @@ func confirmPayload( } metrics.RecordSequencerStepTime("forkChoiceUpdateHeads", time.Since(start)) log.Info("inserted block", "hash", payload.BlockHash, "number", uint64(payload.BlockNumber), - "state_root", payload.StateRoot, "timestamp", uint64(payload.Timestamp), "parent", payload.ParentHash, + "state_root", payload.StateRoot, "timestamp_ms", payload.MillisecondTimestamp(), "parent", payload.ParentHash, "prev_randao", payload.PrevRandao, "fee_recipient", payload.FeeRecipient, "txs", len(payload.Transactions), "update_safe", updateSafe) return envelope, BlockInsertOK, nil diff --git a/op-node/rollup/derive/l1_block_info.go b/op-node/rollup/derive/l1_block_info.go index 93c529750e..7a665d3ba4 100644 --- a/op-node/rollup/derive/l1_block_info.go +++ b/op-node/rollup/derive/l1_block_info.go @@ -260,7 +260,7 @@ func L1BlockInfoFromBytes(rollupCfg *rollup.Config, l2BlockTime uint64, data []b // L1InfoDeposit creates a L1 Info deposit transaction based on the L1 block, // and the L2 block-height difference with the start of the epoch. -func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, block eth.BlockInfo, l2BlockTime uint64) (*types.DepositTx, error) { +func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, block eth.BlockInfo, nextL2MilliTime uint64) (*types.DepositTx, error) { l1BlockInfo := L1BlockInfo{ Number: block.NumberU64(), Time: block.Time(), @@ -270,7 +270,7 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber BatcherAddr: sysCfg.BatcherAddr, } var data []byte - if isEcotoneButNotFirstBlock(rollupCfg, l2BlockTime) { + if isEcotoneButNotFirstBlock(rollupCfg, nextL2MilliTime/1000) { l1BlockInfo.BlobBaseFee = block.BlobBaseFee() if l1BlockInfo.BlobBaseFee == nil { // The L2 spec states to use the MIN_BLOB_GASPRICE from EIP-4844 if not yet active on L1. @@ -314,7 +314,7 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber Data: data, } // With the regolith fork we disable the IsSystemTx functionality, and allocate real gas - if rollupCfg.IsRegolith(l2BlockTime) { + if rollupCfg.IsRegolith(nextL2MilliTime / 1000) { out.IsSystemTransaction = false out.Gas = RegolithSystemTxGas } @@ -322,8 +322,8 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber } // L1InfoDepositBytes returns a serialized L1-info attributes transaction. -func L1InfoDepositBytes(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, l1Info eth.BlockInfo, l2BlockTime uint64) ([]byte, error) { - dep, err := L1InfoDeposit(rollupCfg, sysCfg, seqNumber, l1Info, l2BlockTime) +func L1InfoDepositBytes(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, l1Info eth.BlockInfo, nextL2MilliTime uint64) ([]byte, error) { + dep, err := L1InfoDeposit(rollupCfg, sysCfg, seqNumber, l1Info, nextL2MilliTime) if err != nil { return nil, fmt.Errorf("failed to create L1 info tx: %w", err) } diff --git a/op-node/rollup/derive/l1_block_info_test.go b/op-node/rollup/derive/l1_block_info_test.go index e5c9253ce1..b87c9108d6 100644 --- a/op-node/rollup/derive/l1_block_info_test.go +++ b/op-node/rollup/derive/l1_block_info_test.go @@ -139,9 +139,9 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg := rollup.Config{ RegolithTime: &zero, EcotoneTime: &zero, - BlockTime: 2, + BlockTime: 2000, } - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 2) + depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 2000) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -154,7 +154,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg := rollup.Config{ RegolithTime: &zero, EcotoneTime: &zero, - BlockTime: 2, + BlockTime: 2000, } depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0) require.NoError(t, err) diff --git a/op-node/rollup/derive/l1_traversal.go b/op-node/rollup/derive/l1_traversal.go index 84d2fbc970..be87cf11c5 100644 --- a/op-node/rollup/derive/l1_traversal.go +++ b/op-node/rollup/derive/l1_traversal.go @@ -76,6 +76,8 @@ func (l1t *L1Traversal) AdvanceL1Block(ctx context.Context) error { if err != nil { return NewTemporaryError(fmt.Errorf("failed to fetch receipts of L1 block %s (parent: %s) for L1 sysCfg update: %w", nextL1Origin, origin, err)) } + + // TODO: may need to pass l1origin milli-timestamp later if IsEcotone() use the milli-timestamp if err := UpdateSystemConfigWithL1Receipts(&l1t.sysCfg, receipts, l1t.cfg, nextL1Origin.Time); err != nil { // the sysCfg changes should always be formatted correctly. return NewCriticalError(fmt.Errorf("failed to update L1 sysCfg with receipts from block %s: %w", nextL1Origin, err)) diff --git a/op-node/rollup/derive/l2block_util.go b/op-node/rollup/derive/l2block_util.go index 5946247c7d..6d96478ed9 100644 --- a/op-node/rollup/derive/l2block_util.go +++ b/op-node/rollup/derive/l2block_util.go @@ -3,11 +3,10 @@ package derive import ( "fmt" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) // L2BlockRefSource is a source for the generation of a L2BlockRef. E.g. a @@ -18,6 +17,7 @@ import ( type L2BlockRefSource interface { Hash() common.Hash ParentHash() common.Hash + MixDigest() common.Hash // millisecond part NumberU64() uint64 Time() uint64 Transactions() types.Transactions @@ -54,11 +54,18 @@ func L2BlockToBlockRef(rollupCfg *rollup.Config, block L2BlockRefSource) (eth.L2 sequenceNumber = info.SequenceNumber } + milliPart := uint64(0) + if block.MixDigest() != (common.Hash{}) { + // adapts l2 millisecond, highest 2 bytes as milli-part. + milliPart = uint64(eth.Bytes32(block.MixDigest())[0])*256 + uint64(eth.Bytes32(block.MixDigest())[1]) + } + return eth.L2BlockRef{ Hash: hash, Number: number, ParentHash: block.ParentHash(), Time: block.Time(), + MilliTime: milliPart, L1Origin: l1Origin, SequenceNumber: sequenceNumber, }, nil diff --git a/op-node/rollup/derive/payload_util.go b/op-node/rollup/derive/payload_util.go index 06a3a5a7f7..17feb82994 100644 --- a/op-node/rollup/derive/payload_util.go +++ b/op-node/rollup/derive/payload_util.go @@ -4,10 +4,10 @@ import ( "encoding/binary" "fmt" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" ) // PayloadToBlockRef extracts the essential L2BlockRef information from an execution payload, @@ -33,7 +33,7 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) if tx.Type() != types.DepositTxType { return eth.L2BlockRef{}, fmt.Errorf("first payload tx has unexpected tx type: %d", tx.Type()) } - info, err := L1BlockInfoFromBytes(rollupCfg, uint64(payload.Timestamp), tx.Data()) + info, err := L1BlockInfoFromBytes(rollupCfg, uint64(payload.Timestamp) /* second timestamp for fork*/, tx.Data()) if err != nil { return eth.L2BlockRef{}, fmt.Errorf("failed to parse L1 info deposit tx from L2 block: %w", err) } @@ -41,11 +41,18 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) sequenceNumber = info.SequenceNumber } + // adapts millisecond part + milliPart := uint64(0) + milliPart = uint64(payload.PrevRandao[0])*256 + uint64(payload.PrevRandao[1]) + log.Debug("generate l2 block ref by payload", "timestamp_ms", milliPart, + "payload timestamp", payload.Timestamp, "block num", payload.BlockNumber) + return eth.L2BlockRef{ Hash: payload.BlockHash, Number: uint64(payload.BlockNumber), ParentHash: payload.ParentHash, Time: uint64(payload.Timestamp), + MilliTime: milliPart, L1Origin: l1Origin, SequenceNumber: sequenceNumber, }, nil diff --git a/op-node/rollup/derive/singular_batch.go b/op-node/rollup/derive/singular_batch.go index fdb867efbe..4d2a033cc9 100644 --- a/op-node/rollup/derive/singular_batch.go +++ b/op-node/rollup/derive/singular_batch.go @@ -23,7 +23,7 @@ type SingularBatch struct { ParentHash common.Hash // parent L2 block hash EpochNum rollup.Epoch // aka l1 num EpochHash common.Hash // l1 block hash - Timestamp uint64 + Timestamp uint64 // millisecond Transactions []hexutil.Bytes } diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index aa95b3838a..cbef43407d 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -32,7 +32,7 @@ var ErrTooBigSpanBatchSize = errors.New("span batch size limit reached") var ErrEmptySpanBatch = errors.New("span-batch must not be empty") type spanBatchPrefix struct { - relTimestamp uint64 // Relative timestamp of the first block + relTimestamp uint64 // Relative timestamp of the first block, millisecond l1OriginNum uint64 // L1 origin number parentCheck [20]byte // First 20 bytes of the first block's parent hash l1OriginCheck [20]byte // First 20 bytes of the last block's L1 origin hash @@ -340,7 +340,7 @@ func (b *RawSpanBatch) encode(w io.Writer) error { // derive converts RawSpanBatch into SpanBatch, which has a list of SpanBatchElement. // We need chain config constants to derive values for making payload attributes. -func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { +func (b *RawSpanBatch) derive(rollupCfg *rollup.Config, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { if b.blockCount == 0 { return nil, ErrEmptySpanBatch } @@ -361,6 +361,20 @@ func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.I return nil, err } + var blockInterval uint64 + var millisecondTimestamp bool + if rollupCfg.VoltaTime != nil && *rollupCfg.VoltaTime > genesisTimestamp { + secondSinceVolta := *rollupCfg.VoltaTime - genesisTimestamp + if b.relTimestamp >= secondSinceVolta { + blockInterval = rollup.VoltBlockTime + millisecondTimestamp = true + } else { + blockInterval = rollup.BeforeVoltBlockTime + } + } else { + blockInterval = rollup.BeforeVoltBlockTime + } + spanBatch := SpanBatch{ ParentCheck: b.parentCheck, L1OriginCheck: b.l1OriginCheck, @@ -368,7 +382,13 @@ func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.I txIdx := 0 for i := 0; i < int(b.blockCount); i++ { batch := SpanBatchElement{} - batch.Timestamp = genesisTimestamp + b.relTimestamp + blockTime*uint64(i) + if millisecondTimestamp { + // relTimestamp and blockInterval has changed to millisecond + batch.Timestamp = genesisTimestamp*1000 + b.relTimestamp + blockInterval*uint64(i) + } else { + // relTimestamp is second timestamp before volta + batch.Timestamp = genesisTimestamp*1000 + b.relTimestamp*1000 + blockInterval*uint64(i) + } batch.EpochNum = rollup.Epoch(blockOriginNums[i]) for j := 0; j < int(b.blockTxCounts[i]); j++ { batch.Transactions = append(batch.Transactions, fullTxs[txIdx]) @@ -376,13 +396,17 @@ func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.I } spanBatch.Batches = append(spanBatch.Batches, &batch) } + if millisecondTimestamp { + log.Debug("succeed to build span batch with milliseconds timestamp", "rel timestamp", b.relTimestamp, + "first l1 origin", spanBatch.GetStartEpochNum(), "block count", spanBatch.GetBlockCount()) + } return &spanBatch, nil } // ToSpanBatch converts RawSpanBatch to SpanBatch, // which implements a wrapper of derive method of RawSpanBatch -func (b *RawSpanBatch) ToSpanBatch(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { - spanBatch, err := b.derive(blockTime, genesisTimestamp, chainID) +func (b *RawSpanBatch) ToSpanBatch(rollupCfg *rollup.Config, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { + spanBatch, err := b.derive(rollupCfg, genesisTimestamp, chainID) if err != nil { return nil, err } @@ -402,7 +426,7 @@ type SpanBatchElement struct { func singularBatchToElement(singularBatch *SingularBatch) *SpanBatchElement { return &SpanBatchElement{ EpochNum: singularBatch.EpochNum, - Timestamp: singularBatch.Timestamp, + Timestamp: singularBatch.Timestamp, // ms Transactions: singularBatch.Transactions, } } @@ -548,16 +572,28 @@ func (b *SpanBatch) AppendSingularBatch(singularBatch *SingularBatch, seqNum uin } // ToRawSpanBatch merges SingularBatch List and initialize single RawSpanBatch -func (b *SpanBatch) ToRawSpanBatch() (*RawSpanBatch, error) { +func (b *SpanBatch) ToRawSpanBatch(cfg *rollup.Config) (*RawSpanBatch, error) { if len(b.Batches) == 0 { return nil, errors.New("cannot merge empty singularBatch list") } span_start := b.Batches[0] span_end := b.Batches[len(b.Batches)-1] + relTs := uint64(0) + if cfg.IsVolta(span_start.Timestamp) { + relTs = span_start.Timestamp - b.MillisecondGenesisTimestamp() + } else { + relTs = span_start.Timestamp - b.GenesisTimestamp + } + log.Debug("succeed to make raw span_batch", + "span_start_timestamp", span_start.Timestamp, + "rel_timestamp", relTs, + "genesis_timestamp", b.GenesisTimestamp, + "is_volta", cfg.IsVolta(span_start.Timestamp)) + return &RawSpanBatch{ spanBatchPrefix: spanBatchPrefix{ - relTimestamp: span_start.Timestamp - b.GenesisTimestamp, + relTimestamp: relTs, l1OriginNum: uint64(span_end.EpochNum), parentCheck: b.ParentCheck, l1OriginCheck: b.L1OriginCheck, @@ -570,6 +606,9 @@ func (b *SpanBatch) ToRawSpanBatch() (*RawSpanBatch, error) { }, }, nil } +func (b *SpanBatch) MillisecondGenesisTimestamp() uint64 { + return b.GenesisTimestamp * 1000 +} // GetSingularBatches converts SpanBatchElements after L2 safe head to SingularBatches. // Since SpanBatchElement does not contain EpochHash, set EpochHash from the given L1 blocks. @@ -578,7 +617,7 @@ func (b *SpanBatch) GetSingularBatches(l1Origins []eth.L1BlockRef, l2SafeHead et var singularBatches []*SingularBatch originIdx := 0 for _, batch := range b.Batches { - if batch.Timestamp <= l2SafeHead.Time { + if batch.Timestamp <= l2SafeHead.MillisecondTimestamp() { continue } singularBatch := SingularBatch{ @@ -616,13 +655,13 @@ func NewSpanBatch(genesisTimestamp uint64, chainID *big.Int) *SpanBatch { } // DeriveSpanBatch derives SpanBatch from BatchData. -func DeriveSpanBatch(batchData *BatchData, blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { +func DeriveSpanBatch(batchData *BatchData, rollupCfg *rollup.Config, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { rawSpanBatch, ok := batchData.inner.(*RawSpanBatch) if !ok { return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch")) } // If the batch type is Span batch, derive block inputs from RawSpanBatch. - return rawSpanBatch.ToSpanBatch(blockTime, genesisTimestamp, chainID) + return rawSpanBatch.ToSpanBatch(rollupCfg, genesisTimestamp, chainID) } // ReadTxData reads raw RLP tx data from reader and returns txData and txType diff --git a/op-node/rollup/derive/span_batch_test.go b/op-node/rollup/derive/span_batch_test.go index 4c02c46b2d..8c39696bfb 100644 --- a/op-node/rollup/derive/span_batch_test.go +++ b/op-node/rollup/derive/span_batch_test.go @@ -337,13 +337,13 @@ func TestSpanBatchDerive(t *testing.T) { rng := rand.New(rand.NewSource(0xbab0bab0)) chainID := new(big.Int).SetUint64(rng.Uint64()) - l2BlockTime := uint64(2) + l2BlockTime := uint64(2) * 1000 //ms for originChangedBit := 0; originChangedBit < 2; originChangedBit++ { singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID) safeL2Head := testutils.RandomL2BlockRef(rng) safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:]) - genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128 + genesisTimeStamp := 1 + singularBatches[0].Timestamp/1000 - 128 // second spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID) // set originChangedBit to match the original test implementation @@ -408,7 +408,7 @@ func TestSpanBatchMerge(t *testing.T) { require.NoError(t, err) // check span batch prefix - require.Equal(t, rawSpanBatch.relTimestamp, singularBatches[0].Timestamp-genesisTimeStamp, "invalid relative timestamp") + require.Equal(t, rawSpanBatch.relTimestamp, singularBatches[0].Timestamp-genesisTimeStamp*1000, "invalid relative timestamp") require.Equal(t, rollup.Epoch(rawSpanBatch.l1OriginNum), singularBatches[blockCount-1].EpochNum) require.Equal(t, rawSpanBatch.parentCheck[:], singularBatches[0].ParentHash.Bytes()[:20], "invalid parent check") require.Equal(t, rawSpanBatch.l1OriginCheck[:], singularBatches[blockCount-1].EpochHash.Bytes()[:20], "invalid l1 origin check") @@ -444,8 +444,8 @@ func TestSpanBatchToSingularBatch(t *testing.T) { singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID) safeL2Head := testutils.RandomL2BlockRef(rng) safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:]) - safeL2Head.Time = singularBatches[0].Timestamp - 2 - genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128 + safeL2Head.Time = singularBatches[0].Timestamp/1000 - 2 + genesisTimeStamp := 1 + singularBatches[0].Timestamp/1000 - 128 // second spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID) // set originChangedBit to match the original test implementation diff --git a/op-node/rollup/derive/span_channel_out.go b/op-node/rollup/derive/span_channel_out.go index 8e02b55378..55afd2f4f6 100644 --- a/op-node/rollup/derive/span_channel_out.go +++ b/op-node/rollup/derive/span_channel_out.go @@ -2,7 +2,6 @@ package derive import ( "bytes" - "crypto/rand" "fmt" "io" @@ -108,14 +107,14 @@ func (co *SpanChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Block) if err != nil { return err } - return co.AddSingularBatch(batch, l1Info.SequenceNumber) + return co.AddSingularBatch(rollupCfg, batch, l1Info.SequenceNumber) } // AddSingularBatch adds a SingularBatch to the channel, compressing the data if necessary. // if the new batch would make the channel exceed the target size, the last batch is reverted, // and the compression happens on the previous RLP buffer instead // if the input is too small to need compression, data is accumulated but not compressed -func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64) error { +func (co *SpanChannelOut) AddSingularBatch(cfg *rollup.Config, batch *SingularBatch, seqNum uint64) error { // sentinel error for closed or full channel if co.closed { return ErrChannelOutAlreadyClosed @@ -129,7 +128,7 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64) return fmt.Errorf("failed to append SingularBatch to SpanBatch: %w", err) } // convert Span batch to RawSpanBatch - rawSpanBatch, err := co.spanBatch.ToRawSpanBatch() + rawSpanBatch, err := co.spanBatch.ToRawSpanBatch(cfg) if err != nil { return fmt.Errorf("failed to convert SpanBatch into RawSpanBatch: %w", err) } diff --git a/op-node/rollup/driver/metered_engine.go b/op-node/rollup/driver/metered_engine.go index 29f2c7e4c8..01b5fc4f46 100644 --- a/op-node/rollup/driver/metered_engine.go +++ b/op-node/rollup/driver/metered_engine.go @@ -74,7 +74,7 @@ func (m *MeteredEngine) ConfirmPayload(ctx context.Context, agossip async.AsyncG sealTime := now.Sub(sealingStart) buildTime := now.Sub(m.buildingStartTime) m.metrics.RecordSequencerSealingTime(sealTime) - m.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(m.cfg.BlockTime)*time.Second) + m.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(rollup.VoltBlockTime)*time.Millisecond) txnCount := len(payload.ExecutionPayload.Transactions) m.metrics.CountSequencedTxs(txnCount) diff --git a/op-node/rollup/driver/origin_selector.go b/op-node/rollup/driver/origin_selector.go index 8f4ecf7746..525bed5046 100644 --- a/op-node/rollup/driver/origin_selector.go +++ b/op-node/rollup/driver/origin_selector.go @@ -46,18 +46,23 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc if err != nil { return eth.L1BlockRef{}, err } - msd := los.spec.MaxSequencerDrift(currentOrigin.Time) + // TODO: may need to pass l1origin milli-timestamp later if IsFjord() use the milli-timestamp + msd := los.spec.MaxSequencerDrift(currentOrigin.Time) * 1000 // ms log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time, - "l2_head", l2Head, "l2_head_time", l2Head.Time, "max_seq_drift", msd) + "l2_head", l2Head, "l2_head_time_ms", l2Head.MillisecondTimestamp(), "max_seq_drift_ms", msd) // If we are past the sequencer depth, we may want to advance the origin, but need to still // check the time of the next origin. - pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+msd + pastSeqDrift := los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()) > currentOrigin.MillisecondTimestamp()+msd // Limit the time to fetch next origin block by default refCtx, refCancel := context.WithTimeout(ctx, 100*time.Millisecond) defer refCancel() if pastSeqDrift { - log.Warn("Next L2 block time is past the sequencer drift + current origin time") + log.Warn("Next L2 block time is past the sequencer drift + current origin time", + "l2_head_ms_timestamp", l2Head.MillisecondTimestamp(), + "l2_block_ms_interval", los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()), + "l1_origin_ms_timestamp", currentOrigin.MillisecondTimestamp(), + "max_ms_drift", msd) // Must fetch next L1 block as long as it may take, cause we are pastSeqDrift refCtx = ctx } @@ -93,8 +98,17 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc // of slack. For simplicity, we implement our Sequencer to always start building on the latest // L1 block when we can. // If not pastSeqDrift and next origin receipts not cached, fallback to current origin. - if l2Head.Time+los.cfg.BlockTime >= nextOrigin.Time && (pastSeqDrift || receiptsCached) { + if los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()) >= nextOrigin.MillisecondTimestamp() && (pastSeqDrift || receiptsCached) { return nextOrigin, nil + } else { + log.Warn("select l1 old origin, give up next origin", + "current_l2_head_ms_timestamp", l2Head.MillisecondTimestamp(), + "next_l2_head_ms_timestamp", los.cfg.NextMillisecondBlockTime(l2Head.MillisecondTimestamp()), + "current_l1_origin_ms_timestamp", currentOrigin.MillisecondTimestamp(), + "next_l1_origin_ms_timestamp", nextOrigin.MillisecondTimestamp(), + "l2_past_seq_drift", pastSeqDrift, + "max_ms_drift", msd, + "l1_receipts_cached", receiptsCached) } return currentOrigin, nil diff --git a/op-node/rollup/driver/origin_selector_test.go b/op-node/rollup/driver/origin_selector_test.go index fa3a9c12fe..b02f4f44e0 100644 --- a/op-node/rollup/driver/origin_selector_test.go +++ b/op-node/rollup/driver/origin_selector_test.go @@ -25,7 +25,7 @@ func TestOriginSelectorAdvances(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -67,7 +67,7 @@ func TestOriginSelectorRespectsOriginTiming(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -108,7 +108,7 @@ func TestOriginSelectorRespectsConfDepth(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -151,7 +151,7 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -191,7 +191,7 @@ func TestOriginSelector_FjordSeqDrift(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2, + BlockTime: 2000, FjordTime: u64ptr(20), // a's timestamp } l1 := &testutils.MockL1Source{} @@ -225,7 +225,7 @@ func TestOriginSelectorSeqDriftRespectsNextOriginTime(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -268,7 +268,7 @@ func TestOriginSelectorHandlesLateL1Blocks(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2, + BlockTime: 2000, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) diff --git a/op-node/rollup/driver/sequencer.go b/op-node/rollup/driver/sequencer.go index 7ad227d6cf..76bd91192a 100644 --- a/op-node/rollup/driver/sequencer.go +++ b/op-node/rollup/driver/sequencer.go @@ -99,7 +99,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { // empty blocks (other than the L1 info deposit and any user deposits). We handle this by // setting NoTxPool to true, which will cause the Sequencer to not include any transactions // from the transaction pool. - attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.spec.MaxSequencerDrift(l1Origin.Time) + attrs.NoTxPool = attrs.MillisecondTimestamp() > l1Origin.MillisecondTimestamp()+d.spec.MaxSequencerDrift(l1Origin.Time)*1000 // For the Ecotone activation block we shouldn't include any sequencer transactions. if d.rollupCfg.IsEcotoneActivationBlock(uint64(attrs.Timestamp)) { @@ -154,7 +154,11 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { if safe { d.log.Warn("delaying sequencing to not interrupt safe-head changes", "onto", buildingOnto, "onto_time", buildingOnto.Time) // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - return time.Second * time.Duration(d.rollupCfg.BlockTime) + + if buildingOnto == (eth.L2BlockRef{}) { + return time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval(uint64(time.Now().UnixMilli()))) + } + return time.Millisecond * time.Duration(d.rollupCfg.MillisecondBlockInterval(buildingOnto.MillisecondTimestamp())) } head := d.engine.UnsafeL2Head() @@ -166,8 +170,15 @@ func (d *Sequencer) PlanNextSequencerAction() time.Duration { return delay } - blockTime := time.Duration(d.rollupCfg.BlockTime) * time.Second - payloadTime := time.Unix(int64(head.Time+d.rollupCfg.BlockTime), 0) + var blockInterval uint64 + if buildingOnto == (eth.L2BlockRef{}) { + blockInterval = d.rollupCfg.MillisecondBlockInterval(uint64(time.Now().UnixMilli())) + } else { + blockInterval = d.rollupCfg.MillisecondBlockInterval(buildingOnto.MillisecondTimestamp()) + } + + blockTime := time.Millisecond * time.Duration(blockInterval) + payloadTime := time.UnixMilli(int64(head.MillisecondTimestamp() + blockInterval)) remainingTime := payloadTime.Sub(now) // If we started building a block already, and if that work is still consistent, @@ -222,11 +233,19 @@ func (d *Sequencer) BuildingOnto() eth.L2BlockRef { // If the engine is currently building safe blocks, then that building is not interrupted, and sequencing is delayed. func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.AsyncGossiper, sequencerConductor conductor.SequencerConductor) (*eth.ExecutionPayloadEnvelope, error) { // if the engine returns a non-empty payload, OR if the async gossiper already has a payload, we can CompleteBuildingBlock - if onto, buildingID, safe := d.engine.BuildingPayload(); buildingID != (eth.PayloadID{}) || agossip.Get() != nil { + onto, buildingID, safe := d.engine.BuildingPayload() + var blockInterval uint64 + if onto == (eth.L2BlockRef{}) { + blockInterval = d.rollupCfg.MillisecondBlockInterval(uint64(time.Now().UnixMilli())) + } else { + blockInterval = d.rollupCfg.MillisecondBlockInterval(onto.MillisecondTimestamp()) + } + + if buildingID != (eth.PayloadID{}) || agossip.Get() != nil { if safe { d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) // approximates the worst-case time it takes to build a block, to reattempt sequencing after. - d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(blockInterval)) return nil, nil } envelope, err := d.CompleteBuildingBlock(ctx, agossip, sequencerConductor) @@ -234,9 +253,11 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As if errors.Is(err, derive.ErrCritical) { return nil, err // bubble up critical errors. } else if errors.Is(err, derive.ErrReset) { - d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) + d.log.Error("sequencer failed to seal new block, requiring derivation reset", + "L2 block number", onto.Number, "L1 origin", onto.L1Origin.Number, "timestamp_ms", + onto.MilliTime, "timestamp_second", onto.Time, "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(blockInterval)) // hold off from sequencing for a full block d.CancelBuildingBlock(ctx) return nil, err } else if errors.Is(err, derive.ErrTemporary) { @@ -263,9 +284,10 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context, agossip async.As if errors.Is(err, derive.ErrCritical) { return nil, err } else if errors.Is(err, derive.ErrReset) { - d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) + d.log.Error("sequencer failed to seal new block, requiring derivation reset", + "L2 block number", onto.Number, "timestamp_ms", onto.MilliTime, "timestamp_second", onto.Time, "err", err) d.metrics.RecordSequencerReset() - d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.rollupCfg.BlockTime)) // hold off from sequencing for a full block + d.nextAction = d.timeNow().Add(time.Millisecond * time.Duration(blockInterval)) // hold off from sequencing for a full block return nil, err } else if errors.Is(err, derive.ErrTemporary) { d.log.Error("sequencer temporarily failed to start building new block", "err", err) diff --git a/op-node/rollup/driver/sequencer_test.go b/op-node/rollup/driver/sequencer_test.go index c9dd1d3ff7..0b04ffe1c1 100644 --- a/op-node/rollup/driver/sequencer_test.go +++ b/op-node/rollup/driver/sequencer_test.go @@ -184,7 +184,7 @@ func TestSequencerChaosMonkey(t *testing.T) { L2Time: l1Time + 300, // L2 may start with a relative old L1 origin and will have to catch it up SystemConfig: eth.SystemConfig{}, }, - BlockTime: 2, + BlockTime: 2000, MaxSequencerDrift: 30, } // keep track of the L1 timestamps we mock because sometimes we only have the L1 hash/num handy @@ -259,7 +259,7 @@ func TestSequencerChaosMonkey(t *testing.T) { testGasLimit := eth.Uint64Quantity(10_000_000) return ð.PayloadAttributes{ - Timestamp: eth.Uint64Quantity(l2Parent.Time + cfg.BlockTime), + Timestamp: eth.Uint64Quantity(l2Parent.Time + cfg.SecondBlockInterval()), PrevRandao: eth.Bytes32{}, SuggestedFeeRecipient: common.Address{}, Transactions: []eth.Data{infoDep}, @@ -268,7 +268,7 @@ func TestSequencerChaosMonkey(t *testing.T) { }, nil }) - maxL1BlockTimeGap := uint64(100) + maxL1BlockTimeGap := uint64(100) * 1000 // ms // The origin selector just generates random L1 blocks based on RNG var originErr error originSelector := testOriginSelectorFn(func(ctx context.Context, l2Head eth.L2BlockRef) (eth.L1BlockRef, error) { @@ -282,11 +282,11 @@ func TestSequencerChaosMonkey(t *testing.T) { Time: l1Times[l2Head.L1Origin], } // randomly make a L1 origin appear, if we can even select it - nextL2Time := l2Head.Time + cfg.BlockTime - if nextL2Time <= origin.Time { + nextL2MilliTime := l2Head.MillisecondTimestamp() + cfg.BlockTime + if nextL2MilliTime <= origin.MillisecondTimestamp() { return origin, nil } - maxTimeIncrement := nextL2Time - origin.Time + maxTimeIncrement := nextL2MilliTime - origin.MillisecondTimestamp() if maxTimeIncrement > maxL1BlockTimeGap { maxTimeIncrement = maxL1BlockTimeGap } @@ -295,7 +295,7 @@ func TestSequencerChaosMonkey(t *testing.T) { Hash: mockL1Hash(origin.Number + 1), Number: origin.Number + 1, ParentHash: origin.Hash, - Time: origin.Time + 1 + uint64(rng.Int63n(int64(maxTimeIncrement))), + Time: origin.Time + 1 + uint64(rng.Int63n(int64(maxTimeIncrement/1000))), } l1Times[nextOrigin.ID()] = nextOrigin.Time return nextOrigin, nil @@ -375,7 +375,7 @@ func TestSequencerChaosMonkey(t *testing.T) { l2Head := engControl.UnsafeL2Head() t.Logf("avg build time: %s, clock timestamp: %d, L2 head time: %d, L1 origin time: %d, avg txs per block: %f", engControl.avgBuildingTime(), clockFn().Unix(), l2Head.Time, l1Times[l2Head.L1Origin], engControl.avgTxsPerBlock()) require.Equal(t, engControl.totalBuiltBlocks, desiredBlocks, "persist through random errors and build the desired blocks") - require.Equal(t, l2Head.Time, cfg.Genesis.L2Time+uint64(desiredBlocks)*cfg.BlockTime, "reached desired L2 block timestamp") + require.Equal(t, l2Head.MillisecondTimestamp(), cfg.Genesis.L2Time*1000+uint64(desiredBlocks)*cfg.BlockTime, "reached desired L2 block timestamp") require.GreaterOrEqual(t, l2Head.Time, l1Times[l2Head.L1Origin], "the L2 time >= the L1 time") require.Less(t, l2Head.Time-l1Times[l2Head.L1Origin], uint64(100), "The L1 origin time is close to the L2 time") require.Less(t, clockTime.Sub(time.Unix(int64(l2Head.Time), 0)).Abs(), 2*time.Second, "L2 time is accurate, within 2 seconds of wallclock") diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index baf95e6ca8..f1c5bd5f1b 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -269,7 +269,7 @@ func (s *Driver) eventLoop() { // Create a ticker to check if there is a gap in the engine queue. Whenever // there is, we send requests to sync source to retrieve the missing payloads. - syncCheckInterval := time.Duration(s.config.BlockTime) * time.Second * 2 + syncCheckInterval := time.Duration(rollup.VoltBlockTime) * time.Millisecond * 2 altSyncTicker := time.NewTicker(syncCheckInterval) defer altSyncTicker.Stop() lastUnsafeL2 := s.engineController.UnsafeL2Head() @@ -438,7 +438,7 @@ func (s *Driver) eventLoop() { continue } else if err != nil && errors.Is(err, derive.ErrReset) { // If the pipeline corrupts, e.g. due to a reorg, simply reset it - s.log.Warn("Derivation pipeline is reset", "err", err) + s.log.Warn("Derivation pipeline is reset", "l1 origin", s.derivation.Origin().Number, "err", err) s.derivation.Reset() s.metrics.RecordPipelineReset() continue diff --git a/op-node/rollup/superchain.go b/op-node/rollup/superchain.go index acb5daa538..3714dce1f4 100644 --- a/op-node/rollup/superchain.go +++ b/op-node/rollup/superchain.go @@ -75,7 +75,7 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) { // but since none of the superchain chains differ, it's not represented in the superchain-registry yet. // This restriction on superchain-chains may change in the future. // Test/Alt configurations can still load custom rollup-configs when necessary. - BlockTime: 2, + //BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 3600, ChannelTimeout: 300, diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 3d3aa20759..4bcd086031 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -35,6 +35,11 @@ var ( ErrL2ChainIDNotPositive = errors.New("L2 chain ID must be non-zero and positive") ) +var ( + VoltBlockTime uint64 = 500 + BeforeVoltBlockTime uint64 = 1000 +) + // NetworkNames are user friendly names to use in the chain spec banner. var NetworkNames = map[string]string{ "56": "bscMainnet", @@ -72,8 +77,10 @@ type PlasmaConfig struct { type Config struct { // Genesis anchor point of the rollup Genesis Genesis `json:"genesis"` - // Seconds per L2 block - BlockTime uint64 `json:"block_time"` + // BlockTime is the interval configuration of L2 block; + // which supports the new millisecond unit and is compatible with the legacy second unit. + // Temp delete, will reset after developing + //BlockTime uint64 `json:"block_time"` // Sequencer batches may not be more than MaxSequencerDrift seconds after // the L1 timestamp of the sequencing window end. // @@ -127,6 +134,10 @@ type Config struct { // Active if SnowTime != nil && L2 block timestamp >= *SnowTime, inactive otherwise. SnowTime *uint64 `json:"snow_time,omitempty"` + // VoltaTime sets the activation time of the VoltaTime network upgrade. + // Active if VoltaTime != nil && L2 block timestamp >= *VoltaTime, inactive otherwise. + VoltaTime *uint64 `json:"volta_time,omitempty"` + // Note: below addresses are part of the block-derivation process, // and required to be the same network-wide to stay in consensus. @@ -158,6 +169,66 @@ type Config struct { LegacyUsePlasma bool `json:"use_plasma,omitempty"` } +//const millisecondBlockIntervalVolta = 500 + +func (cfg *Config) MillisecondBlockInterval(millisecondTimestamp uint64) uint64 { + if cfg.IsVolta(millisecondTimestamp / 1000) { + return VoltBlockTime + } + return BeforeVoltBlockTime +} + +func (cfg *Config) SecondBlockInterval(millisecondTimestamp uint64) uint64 { + return cfg.MillisecondBlockInterval(millisecondTimestamp) / 1000 +} + +func (cfg *Config) NextMillisecondBlockTime(millisecondTimestamp uint64) uint64 { + return millisecondTimestamp + cfg.MillisecondBlockInterval(millisecondTimestamp) +} + +func (cfg *Config) NextSecondBlockTime(millisecondTimestamp uint64) uint64 { + return cfg.NextMillisecondBlockTime(millisecondTimestamp) / 1000 +} + +func (c *Config) IsVolta(timestamp uint64) bool { + return c.VoltaTime != nil && timestamp >= *c.VoltaTime +} + +func (c *Config) VoltaBlocNumber() uint64 { + if c.VoltaTime == nil || *c.VoltaTime == 0 { + return 0 + } + return (*c.VoltaTime-c.Genesis.L2Time)/(BeforeVoltBlockTime/1000) + c.Genesis.L2.Number +} + +func (c *Config) IsVoltaActivationBlock(l2BlockMillisecondTime uint64) bool { + if l2BlockMillisecondTime/1000 != 0 { + return false + } + l2BlockTime := l2BlockMillisecondTime / 1000 + return c.IsVolta(l2BlockTime) && + l2BlockTime >= BeforeVoltBlockTime/1000 && + !c.IsVolta(l2BlockTime-BeforeVoltBlockTime/1000) +} + +// MillisecondBlockInterval returns millisecond block interval, which has compatible conversions. +// Mainly used to support milli block interval. +//func (cfg *Config) MillisecondBlockInterval(millisecondTimestamp uint64) uint64 { +// if cfg.BlockTime > 3 { +// return cfg.BlockTime +// } +// return cfg.BlockTime * 1000 +//} + +// SecondBlockInterval returns second block interval, which has compatible conversions. +// Mainly used to compatible to history fork time. +//func (cfg *Config) SecondBlockInterval() uint64 { +// if cfg.BlockTime <= 3 { +// return cfg.BlockTime +// } +// return cfg.BlockTime / 1000 +//} + // ValidateL1Config checks L1 config variables for errors. func (cfg *Config) ValidateL1Config(ctx context.Context, client L1Client) error { // Validate the L1 Client Chain ID @@ -191,22 +262,35 @@ func (cfg *Config) ValidateL2Config(ctx context.Context, client L2Client, skipL2 return nil } -func (cfg *Config) TimestampForBlock(blockNumber uint64) uint64 { - return cfg.Genesis.L2Time + ((blockNumber - cfg.Genesis.L2.Number) * cfg.BlockTime) +func (cfg *Config) MillisecondTimestampForBlock(blockNumber uint64) uint64 { + voltaBlockNumber := cfg.VoltaBlocNumber() + if voltaBlockNumber == 0 || blockNumber <= voltaBlockNumber { + return cfg.Genesis.L2Time*1000 + ((blockNumber - cfg.Genesis.L2.Number) * BeforeVoltBlockTime) + } else { + return *cfg.VoltaTime*1000 + (blockNumber-voltaBlockNumber)*VoltBlockTime + } } -func (cfg *Config) TargetBlockNumber(timestamp uint64) (num uint64, err error) { - // subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that - // difference by the block time to get the expected L2 block number at the current time. If the - // unsafe head does not have this block number, then there is a gap in the queue. - genesisTimestamp := cfg.Genesis.L2Time - if timestamp < genesisTimestamp { - return 0, fmt.Errorf("did not reach genesis time (%d) yet", genesisTimestamp) +func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err error) { + voltaBlockNumber := cfg.VoltaBlocNumber() + if voltaBlockNumber == 0 || milliTimestamp <= *cfg.VoltaTime*1000 { + // subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that + // difference by the block time to get the expected L2 block number at the current time. If the + // unsafe head does not have this block number, then there is a gap in the queue. + genesisMilliTimestamp := cfg.Genesis.L2Time * 1000 + if milliTimestamp < genesisMilliTimestamp { + return 0, fmt.Errorf("did not reach genesis time (%d) yet", genesisMilliTimestamp) + } + wallClockGenesisDiff := milliTimestamp - genesisMilliTimestamp + // Note: round down, we should not request blocks into the future. + blocksSinceGenesis := wallClockGenesisDiff / BeforeVoltBlockTime + return cfg.Genesis.L2.Number + blocksSinceGenesis, nil + } else { + voltaMilliTimestamp := *cfg.VoltaTime * 1000 + wallClockGenesisDiff := milliTimestamp - voltaMilliTimestamp + blocksSinceVolta := wallClockGenesisDiff / VoltBlockTime + return voltaBlockNumber + blocksSinceVolta, nil } - wallClockGenesisDiff := timestamp - genesisTimestamp - // Note: round down, we should not request blocks into the future. - blocksSinceGenesis := wallClockGenesisDiff / cfg.BlockTime - return cfg.Genesis.L2.Number + blocksSinceGenesis, nil } type L1Client interface { @@ -269,9 +353,9 @@ func (cfg *Config) CheckL2GenesisBlockHash(ctx context.Context, client L2Client) // Check verifies that the given configuration makes sense func (cfg *Config) Check() error { - if cfg.BlockTime == 0 { - return ErrBlockTimeZero - } + //if cfg.BlockTime == 0 { + // return ErrBlockTimeZero + //} if cfg.ChannelTimeout == 0 { return ErrMissingChannelTimeout } @@ -336,6 +420,9 @@ func (cfg *Config) Check() error { if err := checkFork(cfg.EcotoneTime, cfg.FjordTime, Ecotone, Fjord); err != nil { return err } + if err := checkFork(cfg.FjordTime, cfg.VoltaTime, Fjord, Volta); err != nil { + return err + } return nil } @@ -427,8 +514,8 @@ func (c *Config) IsFjord(timestamp uint64) bool { // Fjord upgrade. func (c *Config) IsFjordActivationBlock(l2BlockTime uint64) bool { return c.IsFjord(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsFjord(l2BlockTime-c.BlockTime) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsFjord(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } // IsInterop returns true if the Interop hardfork is active at or past the given timestamp. @@ -438,34 +525,34 @@ func (c *Config) IsInterop(timestamp uint64) bool { func (c *Config) IsRegolithActivationBlock(l2BlockTime uint64) bool { return c.IsRegolith(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsRegolith(l2BlockTime-c.BlockTime) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsRegolith(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } func (c *Config) IsCanyonActivationBlock(l2BlockTime uint64) bool { return c.IsCanyon(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsCanyon(l2BlockTime-c.BlockTime) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsCanyon(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } func (c *Config) IsDeltaActivationBlock(l2BlockTime uint64) bool { return c.IsDelta(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsDelta(l2BlockTime-c.BlockTime) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsDelta(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } // IsEcotoneActivationBlock returns whether the specified block is the first block subject to the // Ecotone upgrade. Ecotone activation at genesis does not count. func (c *Config) IsEcotoneActivationBlock(l2BlockTime uint64) bool { return c.IsEcotone(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsEcotone(l2BlockTime-c.BlockTime) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsEcotone(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } func (c *Config) IsInteropActivationBlock(l2BlockTime uint64) bool { return c.IsInterop(l2BlockTime) && - l2BlockTime >= c.BlockTime && - !c.IsInterop(l2BlockTime-c.BlockTime) + l2BlockTime >= c.SecondBlockInterval(l2BlockTime*1000) && + !c.IsInterop(l2BlockTime-c.SecondBlockInterval(l2BlockTime*1000)) } // ForkchoiceUpdatedVersion returns the EngineAPIMethod suitable for the chain hard fork version. diff --git a/op-node/rollup/types_test.go b/op-node/rollup/types_test.go index 248fe3eaad..d427104a58 100644 --- a/op-node/rollup/types_test.go +++ b/op-node/rollup/types_test.go @@ -534,7 +534,7 @@ func TestTimestampForBlock(t *testing.T) { name: "FirstBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2, + blockTime: 2000, blockNum: 0, expectedBlockTime: 100, }, @@ -542,7 +542,7 @@ func TestTimestampForBlock(t *testing.T) { name: "SecondBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2, + blockTime: 2000, blockNum: 1, expectedBlockTime: 102, }, @@ -550,7 +550,7 @@ func TestTimestampForBlock(t *testing.T) { name: "NBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2, + blockTime: 2000, blockNum: 25, expectedBlockTime: 150, }, diff --git a/op-node/service.go b/op-node/service.go index 134a916ee0..0422ef42ab 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -27,6 +27,12 @@ import ( opflags "github.com/ethereum-optimism/optimism/op-service/flags" ) +const ( + minSecondBlockInterval = 1 + maxSecondBlockInterval = 3 + maxMillisecondBlockInterval = 750 +) + // NewConfig creates a Config from the provided flags or environment variables. func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { if err := flags.CheckRequired(ctx); err != nil { @@ -43,6 +49,17 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { rollupConfig.ProtocolVersionsAddress = common.Address{} } + //{ + // if rollupConfig.BlockTime >= minSecondBlockInterval && rollupConfig.BlockTime <= maxSecondBlockInterval { + // // Convert legacy second-level timestamp to millisecond timestamp, + // // This is a compatibility behavior. + // rollupConfig.BlockTime = rollupConfig.BlockTime * 1000 + // } else if rollupConfig.BlockTime%50 != 0 && rollupConfig.BlockTime > maxMillisecondBlockInterval { + // return nil, fmt.Errorf("block time is invalid, block_time: %v", rollupConfig.BlockTime) + // } + // // rollupConfig.BlockTime is millisecond block interval + //} + configPersistence := NewConfigPersistence(ctx) driverConfig := NewDriverConfig(ctx) diff --git a/op-program/client/l2/engineapi/block_processor.go b/op-program/client/l2/engineapi/block_processor.go index d81212db20..b8850f86e2 100644 --- a/op-program/client/l2/engineapi/block_processor.go +++ b/op-program/client/l2/engineapi/block_processor.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -70,6 +71,7 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (* } parentHeader := provider.GetHeaderByHash(header.ParentHash) if header.Time <= parentHeader.Time { + log.Error("invalid timestamp", "header", header, "parent_header", parentHeader) return nil, errors.New("invalid timestamp") } statedb, err := provider.StateAt(parentHeader.Root) diff --git a/op-service/eth/block_info.go b/op-service/eth/block_info.go index 268c6d934b..24b28611ae 100644 --- a/op-service/eth/block_info.go +++ b/op-service/eth/block_info.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/uint256" ) type BlockInfo interface { @@ -16,6 +17,7 @@ type BlockInfo interface { Root() common.Hash // state-root NumberU64() uint64 Time() uint64 + MillisecondTimestamp() uint64 // MixDigest field, reused for randomness after The Merge (Bellatrix hardfork) MixDigest() common.Hash BaseFee() *big.Int @@ -33,11 +35,16 @@ type BlockInfo interface { } func InfoToL1BlockRef(info BlockInfo) L1BlockRef { + milliPart := uint64(0) + if info.MixDigest() != (common.Hash{}) { + milliPart = uint256.NewInt(0).SetBytes32(info.MixDigest().Bytes()).Uint64() + } return L1BlockRef{ Hash: info.Hash(), Number: info.NumberU64(), ParentHash: info.ParentHash(), Time: info.Time(), + MilliTime: milliPart, } } @@ -72,6 +79,14 @@ func (b blockInfo) ParentBeaconRoot() *common.Hash { return b.Block.BeaconRoot() } +func (b blockInfo) MillisecondTimestamp() uint64 { + milliPart := uint64(0) + if b.MixDigest() != (common.Hash{}) { + milliPart = uint256.NewInt(0).SetBytes32(b.MixDigest().Bytes()).Uint64() + } + return b.Block.Time()*1000 + milliPart +} + func BlockToInfo(b *types.Block) BlockInfo { return blockInfo{b} } @@ -102,6 +117,15 @@ func (h headerBlockInfo) Time() uint64 { return h.Header.Time } +func (h headerBlockInfo) MillisecondTimestamp() uint64 { + milliPart := uint64(0) + if h.MixDigest() != (common.Hash{}) { + milliPart = uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() + } + + return h.Header.Time*1000 + milliPart +} + func (h headerBlockInfo) MixDigest() common.Hash { return h.Header.MixDigest } diff --git a/op-service/eth/heads.go b/op-service/eth/heads.go index db837cbbed..f241dc4af8 100644 --- a/op-service/eth/heads.go +++ b/op-service/eth/heads.go @@ -5,9 +5,11 @@ import ( "time" "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/holiman/uint256" ) // HeadSignalFn is used as callback function to accept head-signals @@ -43,11 +45,16 @@ func WatchHeadChanges(ctx context.Context, src NewHeadSource, fn HeadSignalFn) ( for { select { case header := <-headChanges: + mTime := uint64(0) + if header.MixDigest != (common.Hash{}) { + mTime = uint256.NewInt(0).SetBytes32(header.MixDigest[:]).Uint64() + } fn(eventsCtx, L1BlockRef{ Hash: header.Hash(), Number: header.Number.Uint64(), ParentHash: header.ParentHash, Time: header.Time, + MilliTime: mTime, }) case <-eventsCtx.Done(): return nil diff --git a/op-service/eth/id.go b/op-service/eth/id.go index 7beeabfe32..14b356a3e8 100644 --- a/op-service/eth/id.go +++ b/op-service/eth/id.go @@ -34,13 +34,18 @@ type L2BlockRef struct { Hash common.Hash `json:"hash"` Number uint64 `json:"number"` ParentHash common.Hash `json:"parentHash"` - Time uint64 `json:"timestamp"` + Time uint64 `json:"timestamp"` // second timestamp + MilliTime uint64 `json:"millitimestamp"` // support millisecond L1Origin BlockID `json:"l1origin"` SequenceNumber uint64 `json:"sequenceNumber"` // distance to first block of epoch } +func (id L2BlockRef) MillisecondTimestamp() uint64 { + return id.Time*1000 + id.MilliTime +} + func (id L2BlockRef) String() string { - return fmt.Sprintf("%s:%d", id.Hash.String(), id.Number) + return fmt.Sprintf("%s:%d:%d", id.Hash.String(), id.Number, id.MillisecondTimestamp()) } // TerminalString implements log.TerminalStringer, formatting a string for console @@ -53,11 +58,16 @@ type L1BlockRef struct { Hash common.Hash `json:"hash"` Number uint64 `json:"number"` ParentHash common.Hash `json:"parentHash"` - Time uint64 `json:"timestamp"` + Time uint64 `json:"timestamp"` // second timestamp + MilliTime uint64 `json:"millisecondTimestamp"` // support millisecond +} + +func (id L1BlockRef) MillisecondTimestamp() uint64 { + return id.Time*1000 + id.MilliTime } func (id L1BlockRef) String() string { - return fmt.Sprintf("%s:%d", id.Hash.String(), id.Number) + return fmt.Sprintf("%s:%d:%d", id.Hash.String(), id.Number, id.MillisecondTimestamp()) } // TerminalString implements log.TerminalStringer, formatting a string for console diff --git a/op-service/eth/types.go b/op-service/eth/types.go index 290cffd3e7..e8df2c205b 100644 --- a/op-service/eth/types.go +++ b/op-service/eth/types.go @@ -196,6 +196,11 @@ type ExecutionPayload struct { ExcessBlobGas *Uint64Quantity `json:"excessBlobGas,omitempty"` } +func (payload *ExecutionPayload) MillisecondTimestamp() uint64 { + milliPart := uint64(payload.PrevRandao[0])*256 + uint64(payload.PrevRandao[1]) + return uint64(payload.Timestamp)*1000 + milliPart +} + func (payload *ExecutionPayload) ID() BlockID { return BlockID{Hash: payload.BlockHash, Number: uint64(payload.BlockNumber)} } @@ -328,6 +333,27 @@ type PayloadAttributes struct { GasLimit *Uint64Quantity `json:"gasLimit,omitempty"` } +func (pa *PayloadAttributes) MillisecondTimestamp() uint64 { + milliPart := uint64(pa.PrevRandao[0])*256 + uint64(pa.PrevRandao[1]) + return uint64(pa.Timestamp)*1000 + milliPart +} + +// SetMillisecondTimestamp is used to set millisecond timestamp. +// [32]byte PrevRandao +// [0][1] represent l2 millisecond's mill part. +func (pa *PayloadAttributes) SetMillisecondTimestamp(ts uint64, updateMilliSecond bool) { + pa.Timestamp = hexutil.Uint64(ts / 1000) + if updateMilliSecond { + milliPartBytes := uint256.NewInt(ts % 1000).Bytes32() + pa.PrevRandao[0] = milliPartBytes[30] + pa.PrevRandao[1] = milliPartBytes[31] + + // It is just a marker byte to ensure that the whole is not empty; + // op-geth relies on non-empty to determine that the passed in millisecond timestamp. + pa.PrevRandao[2] = 1 + } +} + type ExecutePayloadStatus string const ( diff --git a/op-service/sources/l2_client.go b/op-service/sources/l2_client.go index 812ddcab7c..7a22f81954 100644 --- a/op-service/sources/l2_client.go +++ b/op-service/sources/l2_client.go @@ -32,9 +32,9 @@ func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig span := int(config.SeqWindowSize) * 3 / 2 // Estimate number of L2 blocks in this span of L1 blocks // (there's always one L2 block per L1 block, L1 is thus the minimum, even if block time is very high) - if config.BlockTime < 12 && config.BlockTime > 0 { + if config.SecondBlockInterval(0) < 12 && config.SecondBlockInterval(0) > 0 { span *= 12 - span /= int(config.BlockTime) + span /= int(config.SecondBlockInterval(0)) } fullSpan := span if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large diff --git a/op-service/sources/types.go b/op-service/sources/types.go index afb994bd7c..5329ab27c1 100644 --- a/op-service/sources/types.go +++ b/op-service/sources/types.go @@ -63,6 +63,14 @@ func (h headerInfo) Time() uint64 { return h.Header.Time } +func (h headerInfo) MillisecondTimestamp() uint64 { + milliPart := uint64(0) + if h.MixDigest() != (common.Hash{}) { + milliPart = uint256.NewInt(0).SetBytes32(h.MixDigest().Bytes()).Uint64() + } + return h.Header.Time*1000 + milliPart +} + func (h headerInfo) MixDigest() common.Hash { return h.Header.MixDigest } diff --git a/op-service/testutils/l1info.go b/op-service/testutils/l1info.go index 8f04b71fed..b7c86e9a3d 100644 --- a/op-service/testutils/l1info.go +++ b/op-service/testutils/l1info.go @@ -21,6 +21,7 @@ type MockBlockInfo struct { InfoRoot common.Hash InfoNum uint64 InfoTime uint64 + InfoMilliTime uint64 InfoMixDigest [32]byte InfoBaseFee *big.Int InfoBlobBaseFee *big.Int @@ -56,6 +57,10 @@ func (l *MockBlockInfo) Time() uint64 { return l.InfoTime } +func (l *MockBlockInfo) MillisecondTimestamp() uint64 { + return l.InfoTime*1000 + l.InfoMilliTime +} + func (l *MockBlockInfo) MixDigest() common.Hash { return l.InfoMixDigest } diff --git a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol index fe2c7dd7c8..748901f196 100644 --- a/packages/contracts-bedrock/src/L1/L2OutputOracle.sol +++ b/packages/contracts-bedrock/src/L1/L2OutputOracle.sol @@ -25,7 +25,7 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public submissionInterval; - /// @notice The time between L2 blocks in seconds. Once set, this value MUST NOT be modified. + /// @notice The time between L2 blocks in seconds before Volta Hardfork. Once set, this value MUST NOT be modified. /// @custom:network-specific uint256 public l2BlockTime; @@ -41,6 +41,13 @@ contract L2OutputOracle is Initializable, ISemver { /// @custom:network-specific uint256 public finalizationPeriodSeconds; + /// @notice The time between L2 blocks in milliseconds after Volta Hardfork. + uint256 public constant l2MillisecondsBlockTime = 500; + + // TODO: compute accurate hardfork block number + /// @notice The L2 block number of Volta Hardfork. + uint256 public constant voltaBlockNumber = 0; + /// @notice Emitted when an output is proposed. /// @param outputRoot The output root. /// @param l2OutputIndex The index of the output in the l2Outputs array. @@ -201,10 +208,7 @@ contract L2OutputOracle is Initializable, ISemver { "L2OutputOracle: block number must be equal to next expected block number" ); - require( - computeL2Timestamp(_l2BlockNumber) < block.timestamp, - "L2OutputOracle: cannot propose L2 output in the future" - ); + require(isL2TimestampValid(_l2BlockNumber), "L2OutputOracle: cannot propose L2 output in the future"); require(_outputRoot != bytes32(0), "L2OutputOracle: L2 output proposal cannot be the zero hash"); @@ -307,10 +311,34 @@ contract L2OutputOracle is Initializable, ISemver { return latestBlockNumber() + submissionInterval; } + /// @notice Checks the given l2 block number is valid. + /// @param _l2BlockNumber The L2 block number of the target block. + /// @return True that can submit output root, otherwise false. + function isL2TimestampValid(uint256 _l2BlockNumber) public view returns (bool) { + uint256 l2Timestamp = _l2BlockNumber <= voltaBlockNumber + ? computeL2Timestamp(_l2BlockNumber) + : computeL2TimestampAfterVolta(_l2BlockNumber); + + uint256 currentTimestamp = _l2BlockNumber <= voltaBlockNumber ? block.timestamp : block.timestamp * 1000; + + return l2Timestamp < currentTimestamp; + } + /// @notice Returns the L2 timestamp corresponding to a given L2 block number. /// @param _l2BlockNumber The L2 block number of the target block. - /// @return L2 timestamp of the given block. + /// @return L2 timestamp of the given block in seconds. function computeL2Timestamp(uint256 _l2BlockNumber) public view returns (uint256) { return startingTimestamp + ((_l2BlockNumber - startingBlockNumber) * l2BlockTime); } + + /// @notice Returns the L2 timestamp corresponding to a given L2 block number after Volta Hardfork. + /// @param _l2BlockNumber The L2 block number of the target block. + /// @return L2 timestamp of the given block in milliseconds. + function computeL2TimestampAfterVolta(uint256 _l2BlockNumber) public view returns (uint256) { + uint256 beforeVoltaBlockTime = (voltaBlockNumber - startingBlockNumber) * l2BlockTime * 1000; + uint256 afterVoltaBlockTime = (_l2BlockNumber - voltaBlockNumber) * l2MillisecondsBlockTime; + uint256 totalPassedBlockTime = beforeVoltaBlockTime + afterVoltaBlockTime; + + return (startingTimestamp * 1000) + totalPassedBlockTime; + } } diff --git a/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol b/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol index 3d89809e9b..821041f004 100644 --- a/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol +++ b/packages/contracts-bedrock/test/L1/L2OutputOracle.t.sol @@ -197,6 +197,35 @@ contract L2OutputOracle_getter_Test is CommonTest { l2OutputOracle.computeL2Timestamp(startingBlockNumber + 96024), startingTimestamp + l2BlockTime * 96024 ); } + + function test_isL2TimestampValid_before_hardfork_succeeds() external { + uint256 startingBlockNumber = deploy.cfg().l2OutputOracleStartingBlockNumber(); + assertEq(startingBlockNumber, 1); + uint256 startingTimestamp = deploy.cfg().l2OutputOracleStartingTimestamp(); + assertEq(startingTimestamp, 1); + uint256 l2BlockTime = deploy.cfg().l2BlockTime(); + assertEq(l2BlockTime, 2); + + vm.warp(138901 * 2 + 1); + l2OutputOracle.isL2TimestampValid(138901); + } + + function test_isL2TimestampValid_after_hardfork_succeeds() external { + uint256 startingBlockNumber = deploy.cfg().l2OutputOracleStartingBlockNumber(); + uint256 startingTimestamp = deploy.cfg().l2OutputOracleStartingTimestamp(); + uint256 l2BlockTime = deploy.cfg().l2BlockTime(); + + vm.roll(140100); + l2OutputOracle.isL2TimestampValid(140000); + } + + function test_computeL2TimestampAfterVolta_succeeds() external { + uint256 startingBlockNumber = deploy.cfg().l2OutputOracleStartingBlockNumber(); + uint256 startingTimestamp = deploy.cfg().l2OutputOracleStartingTimestamp(); + uint256 l2BlockTime = deploy.cfg().l2BlockTime(); + + l2OutputOracle.computeL2TimestampAfterVolta(140000); + } } contract L2OutputOracle_proposeL2Output_Test is CommonTest {