diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 81d0d7843..656a0eae6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -85,7 +85,6 @@ jobs: op-node-test: runs-on: ubuntu-latest - needs: op-node-lint steps: - name: Check out code diff --git a/op-batcher/batcher/channel_builder_test.go b/op-batcher/batcher/channel_builder_test.go index 8d1cbd821..90a6b682c 100644 --- a/op-batcher/batcher/channel_builder_test.go +++ b/op-batcher/batcher/channel_builder_test.go @@ -782,7 +782,7 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) { require.NoError(err) err = spanBatch.AppendSingularBatch(singularBatch, l1Info.SequenceNumber) require.NoError(err) - rawSpanBatch, err := spanBatch.ToRawSpanBatch() + rawSpanBatch, err := spanBatch.ToRawSpanBatch(&defaultTestRollupConfig) require.NoError(err) batch := derive.NewBatchData(rawSpanBatch) var buf bytes.Buffer diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index 0242c7587..2aefa799b 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -720,7 +720,7 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas GasLimit: uint64(d.L2GenesisBlockGasLimit), }, }, - //BlockTime: d.L2BlockTime, + BlockTime: d.L2BlockTime, MaxSequencerDrift: d.MaxSequencerDrift, SeqWindowSize: d.SequencerWindowSize, ChannelTimeout: d.ChannelTimeout, diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index ad76693ea..e93da9006 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-batcher/compressor" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/stretchr/testify/require" ) @@ -171,13 +172,15 @@ func BenchmarkFinalBatchChannelOut(b *testing.B) { cout, _ := channelOutByType(b, tc.BatchType, tc.cd) // add all but the final batch to the channel out for i := 0; i < tc.BatchCount-1; i++ { - err := cout.AddSingularBatch(batches[i], 0) + var cfg rollup.Config + err := cout.AddSingularBatch(&cfg, batches[i], 0) require.NoError(b, err) } // measure the time to add the final batch b.StartTimer() // add the final batch to the channel out - err := cout.AddSingularBatch(batches[tc.BatchCount-1], 0) + var cfg rollup.Config + err := cout.AddSingularBatch(&cfg, batches[tc.BatchCount-1], 0) require.NoError(b, err) } }) @@ -233,7 +236,8 @@ func BenchmarkIncremental(b *testing.B) { } b.StartTimer() for i := 0; i < tc.BatchCount; i++ { - err := cout.AddSingularBatch(batches[i], 0) + var cfg rollup.Config + err := cout.AddSingularBatch(&cfg, batches[i], 0) if err != nil { done = true return @@ -297,7 +301,8 @@ func BenchmarkAllBatchesChannelOut(b *testing.B) { b.StartTimer() // add all batches to the channel out for i := 0; i < tc.BatchCount; i++ { - err := cout.AddSingularBatch(batches[i], 0) + var cfg rollup.Config + err := cout.AddSingularBatch(&cfg, batches[i], 0) require.NoError(b, err) } } @@ -347,7 +352,8 @@ func BenchmarkGetRawSpanBatch(b *testing.B) { require.NoError(b, err) } b.StartTimer() - _, err := spanBatch.ToRawSpanBatch() + var cfg rollup.Config + _, err := spanBatch.ToRawSpanBatch(&cfg) require.NoError(b, err) } }) diff --git a/op-node/chaincfg/chains.go b/op-node/chaincfg/chains.go index 7eebf2c30..d7256bad9 100644 --- a/op-node/chaincfg/chains.go +++ b/op-node/chaincfg/chains.go @@ -127,7 +127,7 @@ var OPBNBMainnet = rollup.Config{ GasLimit: 100000000, }, }, - //BlockTime: 1, + BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, @@ -163,7 +163,7 @@ var OPBNBTestnet = rollup.Config{ GasLimit: 100000000, }, }, - //BlockTime: 1, + BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, @@ -199,7 +199,7 @@ var OPBNBQANet = rollup.Config{ GasLimit: 100000000, }, }, - //BlockTime: 1, + BlockTime: 1, MaxSequencerDrift: 600, SeqWindowSize: 14400, ChannelTimeout: 1200, diff --git a/op-node/cmd/batch_decoder/reassemble/reassemble.go b/op-node/cmd/batch_decoder/reassemble/reassemble.go index dc9526265..799b76ed8 100644 --- a/op-node/cmd/batch_decoder/reassemble/reassemble.go +++ b/op-node/cmd/batch_decoder/reassemble/reassemble.go @@ -135,7 +135,7 @@ func processFrames(cfg Config, rollupCfg *rollup.Config, id derive.ChannelID, fr // singularBatch will be nil when errored batches = append(batches, singularBatch) case derive.SpanBatchType: - spanBatch, err := derive.DeriveSpanBatch(batchData, cfg.L2BlockTime, cfg.L2GenesisTime, cfg.L2ChainID) + spanBatch, err := derive.DeriveSpanBatch(batchData, rollupCfg, cfg.L2GenesisTime, cfg.L2ChainID) if err != nil { invalidBatches = true fmt.Printf("Error deriving spanBatch from batchData for channel %v. Err: %v\n", id.String(), err) diff --git a/op-node/p2p/app_params.go b/op-node/p2p/app_params.go index 695b00674..e8bea8472 100644 --- a/op-node/p2p/app_params.go +++ b/op-node/p2p/app_params.go @@ -24,7 +24,7 @@ type ApplicationScoreParams struct { } func LightApplicationScoreParams(cfg *rollup.Config) ApplicationScoreParams { - slot := time.Duration(rollup.VoltBlockTime) * time.Millisecond + slot := time.Duration(cfg.BlockTime) * time.Second if slot == 0 { slot = 2 * time.Second } diff --git a/op-node/p2p/peer_params.go b/op-node/p2p/peer_params.go index 6c76025b2..6a60e2160 100644 --- a/op-node/p2p/peer_params.go +++ b/op-node/p2p/peer_params.go @@ -33,7 +33,7 @@ func ScoreDecay(duration time.Duration, slot time.Duration) float64 { // // [PeerScoreParams]: https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub@v0.8.1#PeerScoreParams func LightPeerScoreParams(cfg *rollup.Config) pubsub.PeerScoreParams { - slot := time.Duration(rollup.VoltBlockTime) * time.Millisecond + slot := time.Duration(cfg.BlockTime) * time.Second if slot == 0 { slot = 2 * time.Second } diff --git a/op-node/rollup/chain_spec_test.go b/op-node/rollup/chain_spec_test.go index b006bdf70..b6547835c 100644 --- a/op-node/rollup/chain_spec_test.go +++ b/op-node/rollup/chain_spec_test.go @@ -33,7 +33,7 @@ var testConfig = Config{ GasLimit: 30_000_000, }, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 3600, ChannelTimeout: 300, diff --git a/op-node/rollup/derive/batch_queue_test.go b/op-node/rollup/derive/batch_queue_test.go index d0e5af675..4352f29ff 100644 --- a/op-node/rollup/derive/batch_queue_test.go +++ b/op-node/rollup/derive/batch_queue_test.go @@ -55,7 +55,7 @@ func b(chainId *big.Int, timestamp uint64, epoch eth.L1BlockRef) *SingularBatch txData, _ := tx.MarshalBinary() return &SingularBatch{ ParentHash: mockHash(timestamp-2, 2), - Timestamp: timestamp * 1000, + Timestamp: timestamp * 1000, // after derive, this is millisecond timestamp, mock ut EpochNum: rollup.Epoch(epoch.Number), EpochHash: epoch.Hash, Transactions: []hexutil.Bytes{txData}, @@ -115,7 +115,7 @@ func singularBatchToBlockRef(t *testing.T, batch *SingularBatch, blockNumber uin Hash: mockHash(batch.Timestamp/1000, 2), Number: blockNumber, ParentHash: batch.ParentHash, - Time: batch.Timestamp, + Time: batch.Timestamp / 1000, // second timestamp L1Origin: eth.BlockID{Hash: batch.EpochHash, Number: uint64(batch.EpochNum)}, } } @@ -242,7 +242,7 @@ func BatchQueueEager(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -295,7 +295,7 @@ func BatchQueueEager(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += cfg.SecondBlockInterval() + safeHead.Time += cfg.BlockTime safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -320,7 +320,7 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 2, DeltaTime: getDeltaTime(batchType), @@ -396,11 +396,11 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) require.NotNil(t, b) - require.Equal(t, (safeHead.Time+2)*1000, b.Timestamp) + require.Equal(t, safeHead.Time+2, b.Timestamp/1000) require.Equal(t, rollup.Epoch(1), b.EpochNum) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() b, _, e = bq.NextBatch(context.Background(), safeHead) require.ErrorIs(t, e, io.EOF) @@ -412,10 +412,10 @@ func BatchQueueInvalidInternalAdvance(t *testing.T, batchType int) { require.Nil(t, e) require.NotNil(t, b) require.Equal(t, rollup.Epoch(2), b.EpochNum) - require.Equal(t, (safeHead.Time+2)*1000, b.Timestamp) + require.Equal(t, safeHead.Time+2, b.Timestamp/1000) safeHead.Number += 1 safeHead.Time += 2 - safeHead.Hash = mockHash(b.Timestamp, 2) + safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() b, _, e = bq.NextBatch(context.Background(), safeHead) require.ErrorIs(t, e, io.EOF) @@ -440,7 +440,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 2, DeltaTime: getDeltaTime(batchType), @@ -498,7 +498,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { // Check for a generated batch at t = 12 b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(12000)) + require.Equal(t, b.Timestamp/1000, uint64(12)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 @@ -508,7 +508,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { // Check for generated batch at t = 14 b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(14000)) + require.Equal(t, b.Timestamp/1000, uint64(14)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(0), b.EpochNum) safeHead.Number += 1 @@ -534,7 +534,7 @@ func BatchQueueMissing(t *testing.T, batchType int) { require.Equal(t, e, io.EOF) b, _, e = bq.NextBatch(context.Background(), safeHead) require.Nil(t, e) - require.Equal(t, b.Timestamp, uint64(18000)) + require.Equal(t, b.Timestamp/1000, uint64(18)) require.Empty(t, b.Transactions) require.Equal(t, rollup.Epoch(1), b.EpochNum) } @@ -557,7 +557,7 @@ func BatchQueueAdvancedEpoch(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -620,7 +620,7 @@ func BatchQueueAdvancedEpoch(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.SecondBlockInterval() + safeHead.Time += cfg.BlockTime safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -644,7 +644,7 @@ func BatchQueueShuffle(t *testing.T, batchType int) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(batchType), @@ -719,7 +719,7 @@ func BatchQueueShuffle(t *testing.T, batchType int) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.SecondBlockInterval() + safeHead.Time += cfg.BlockTime safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -742,7 +742,7 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -822,7 +822,7 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) { } else { require.Equal(t, expectedOutputBatches[i], b) safeHead.Number += 1 - safeHead.Time += cfg.SecondBlockInterval() + safeHead.Time += cfg.BlockTime safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -847,7 +847,7 @@ func TestBatchQueueComplex(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -940,7 +940,7 @@ func TestBatchQueueComplex(t *testing.T) { } else { require.Equal(t, expectedOutput, b) safeHead.Number += 1 - safeHead.Time += cfg.SecondBlockInterval() + safeHead.Time += cfg.BlockTime safeHead.Hash = mockHash(b.Timestamp/1000, 2) safeHead.L1Origin = b.Epoch() } @@ -965,7 +965,7 @@ func TestBatchQueueResetSpan(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 30, DeltaTime: getDeltaTime(SpanBatchType), @@ -998,7 +998,7 @@ func TestBatchQueueResetSpan(t *testing.T) { // This NextBatch() will return the second singular batch. safeHead.Number += 1 - safeHead.Time += cfg.SecondBlockInterval() + safeHead.Time += cfg.BlockTime safeHead.Hash = mockHash(nextBatch.Timestamp/1000, 2) safeHead.L1Origin = nextBatch.Epoch() nextBatch, _, err = bq.NextBatch(context.Background(), safeHead) diff --git a/op-node/rollup/derive/batch_test.go b/op-node/rollup/derive/batch_test.go index b7b63ea44..47e9c36b7 100644 --- a/op-node/rollup/derive/batch_test.go +++ b/op-node/rollup/derive/batch_test.go @@ -78,7 +78,7 @@ func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch { func RandomValidConsecutiveSingularBatches(rng *rand.Rand, chainID *big.Int) []*SingularBatch { blockCount := 2 + rng.Intn(128) - l2BlockTime := uint64(2) * 1000 // ms + l2BlockTime := uint64(2) var singularBatches []*SingularBatch for i := 0; i < blockCount; i++ { @@ -87,7 +87,7 @@ func RandomValidConsecutiveSingularBatches(rng *rand.Rand, chainID *big.Int) []* } l1BlockNum := rng.Uint64() // make sure oldest timestamp is large enough - singularBatches[0].Timestamp += 256 * 1000 // ms + singularBatches[0].Timestamp += 256 for i := 0; i < blockCount; i++ { originChangedBit := rng.Intn(2) if originChangedBit == 1 { @@ -160,7 +160,9 @@ func TestBatchRoundTrip(t *testing.T) { err = dec.UnmarshalBinary(enc) require.NoError(t, err) if dec.GetBatchType() == SpanBatchType { - _, err := DeriveSpanBatch(&dec, blockTime, genesisTimestamp, chainID) + var cfg rollup.Config + cfg.BlockTime = blockTime + _, err := DeriveSpanBatch(&dec, &cfg, genesisTimestamp, chainID) require.NoError(t, err) } require.Equal(t, batch, &dec, "Batch not equal test case %v", i) @@ -208,7 +210,9 @@ func TestBatchRoundTripRLP(t *testing.T) { err = dec.DecodeRLP(s) require.NoError(t, err) if dec.GetBatchType() == SpanBatchType { - _, err = DeriveSpanBatch(&dec, blockTime, genesisTimestamp, chainID) + var cfg rollup.Config + cfg.BlockTime = blockTime + _, err = DeriveSpanBatch(&dec, &cfg, genesisTimestamp, chainID) require.NoError(t, err) } require.Equal(t, batch, &dec, "Batch not equal test case %v", i) diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index 70eaccf53..a8a795d9f 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -215,18 +215,18 @@ func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1B log.Warn("batch has misaligned timestamp, block time is too short") return BatchDrop } - if (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())%rollup.VoltBlockTime != 0 { + if (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())%rollup.MillisecondBlockIntervalVolta != 0 { log.Warn("batch has misaligned timestamp, not overlapped exactly") return BatchDrop } - //parentNum = l2SafeHead.Number - (l2SafeHead.MillisecondTimestamp()-batch.GetTimestamp())/cfg.MillisecondBlockInterval() - 1 - var err error - parentNum, err = cfg.TargetBlockNumber(batch.GetTimestamp()) + // TODO: + currentNum, err := cfg.TargetBlockNumber(batch.GetTimestamp()) if err != nil { - log.Warn("failed to computer batch parent number", "batch_ms_time", batch.GetTimestamp(), "err", err) + log.Warn("failed to computer batch number", "batch_ms_time", batch.GetTimestamp(), "err", err) // unable to validate the batch for now. retry later. return BatchUndecided } + parentNum = currentNum - 1 parentBlock, err = l2Fetcher.L2BlockRefByNumber(ctx, parentNum) if err != nil { log.Warn("failed to fetch L2 block", "number", parentNum, "err", err) diff --git a/op-node/rollup/derive/batches_test.go b/op-node/rollup/derive/batches_test.go index b3eb4309c..dc5e21030 100644 --- a/op-node/rollup/derive/batches_test.go +++ b/op-node/rollup/derive/batches_test.go @@ -24,36 +24,43 @@ type ValidBatchTestCase struct { L2SafeHead eth.L2BlockRef Batch BatchWithL1InclusionBlock Expected BatchValidity - ExpectedLog string // log message that must be included - NotExpectedLog string // log message that must not be included - ConfigMod func(*rollup.Config) // optional rollup config mod + ExpectedLog string // log message that must be included + NotExpectedLog string // log message that must not be included + ConfigMod func(*rollup.Config, *ValidBatchTestCase) // optional rollup config mod } var zero64 = uint64(0) -func deltaAtGenesis(c *rollup.Config) { +func deltaAtGenesis(c *rollup.Config, t *ValidBatchTestCase) { c.DeltaTime = &zero64 + _ = t } -func deltaAt(t *uint64) func(*rollup.Config) { - return func(c *rollup.Config) { - c.DeltaTime = t - } +func setDeltaAndL2Time(c *rollup.Config, t *ValidBatchTestCase) { + c.DeltaTime = &zero64 + _ = t + c.Genesis.L2Time = t.L2SafeHead.Number - t.L2SafeHead.Time } -func fjordAt(t *uint64) func(*rollup.Config) { - return func(c *rollup.Config) { - c.FjordTime = t - } -} - -func multiMod[T any](mods ...func(T)) func(T) { - return func(x T) { - for _, mod := range mods { - mod(x) - } - } -} +//func deltaAt(t *uint64) func(*rollup.Config) { +// return func(c *rollup.Config) { +// c.DeltaTime = t +// } +//} +// +//func fjordAt(t *uint64) func(*rollup.Config) { +// return func(c *rollup.Config) { +// c.FjordTime = t +// } +//} +// +//func multiMod[T any](mods ...func(T)) func(T) { +// return func(x T) { +// for _, mod := range mods { +// mod(x) +// } +// } +//} const defaultBlockTime = 2 @@ -63,7 +70,7 @@ func TestValidBatch(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 31, // a genesis time that itself does not align to make it more interesting }, - BlockTime: defaultBlockTime * 1000, + BlockTime: defaultBlockTime, SeqWindowSize: 4, MaxSequencerDrift: 6, } @@ -107,6 +114,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l1E.Hash, Time: l1E.Time + 7, } + _ = l1F l2A0 := eth.L2BlockRef{ Hash: testutils.RandomHash(rng), @@ -213,6 +221,7 @@ func TestValidBatch(t *testing.T) { L1Origin: l1Z.ID(), SequenceNumber: 0, } + _ = l2Z0 l2A4 := eth.L2BlockRef{ Hash: testutils.RandomHash(rng), @@ -229,6 +238,7 @@ func TestValidBatch(t *testing.T) { ParentHash: l1A.Hash, Time: l2A4.Time + 1, // too late for l2A4 to adopt yet } + _ = l1BLate singularBatchTestCases := []ValidBatchTestCase{ { @@ -263,1289 +273,1289 @@ func TestValidBatch(t *testing.T) { }, Expected: BatchFuture, }, - { - Name: "old timestamp", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A0.MillisecondTimestamp(), // repeating the same time - Transactions: nil, - }, - }, - Expected: BatchDrop, - }, - { // TODO: - Name: "misaligned timestamp", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: (l2A1.Time - 1) * 1000, // block time is 2, so this is 1 too low - Transactions: nil, - }, - }, - Expected: BatchDrop, - }, - { - Name: "invalid parent block hash", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ - ParentHash: testutils.RandomHash(rng), - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - }, - Expected: BatchDrop, - }, - { - Name: "sequence window expired", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D, l1E, l1F}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1F, // included in 5th block after epoch of batch, while seq window is 4 - Batch: &SingularBatch{ - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - }, - Expected: BatchDrop, - }, - { - Name: "epoch too old, but good parent hash and timestamp", // repeat of now outdated l2A3 data - L1Blocks: []eth.L1BlockRef{l1B, l1C, l1D}, - L2SafeHead: l2B0, // we already moved on to B - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: &SingularBatch{ - ParentHash: l2B0.Hash, // build on top of safe head to continue - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid - EpochHash: l2A3.L1Origin.Hash, - Timestamp: (l2B0.Time + defaultBlockTime) * 1000, // pass the timestamp check to get too epoch check - Transactions: nil, - }, - }, - Expected: BatchDrop, - }, - { - Name: "insufficient L1 info for eager derivation", - L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: &SingularBatch{ - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, - Expected: BatchUndecided, - }, - { - Name: "epoch too new", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1D, - Batch: &SingularBatch{ - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C - EpochHash: l1C.Hash, - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, - Expected: BatchDrop, - }, - { - Name: "epoch hash wrong", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: &SingularBatch{ - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, - Expected: BatchDrop, - }, - { - Name: "sequencer time drift on same epoch with non-empty txs", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, - }, - }, - Expected: BatchDrop, - }, - { // this is the same test case as above, but with Fjord activated at the L1 origin, so accepted batch - Name: "no sequencer time drift on same epoch with non-empty txs and Fjord", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{[]byte("sequencer should include this tx")}, - }, - }, - ConfigMod: fjordAt(&l1A.Time), - Expected: BatchAccept, - }, - { - Name: "sequencer time drift on changing epoch with non-empty txs", - L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, - L2SafeHead: l2X0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1Z, - Batch: &SingularBatch{ - ParentHash: l2Y0.ParentHash, - EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), - EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time - Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, - }, - }, - Expected: BatchDrop, - }, - { - Name: "sequencer time drift on same epoch with empty txs and late next epoch", - L1Blocks: []eth.L1BlockRef{l1A, l1BLate}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1BLate, - Batch: &SingularBatch{ // l2A4 time < l1BLate time, so we cannot adopt origin B yet - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: nil, - }, - }, - Expected: BatchAccept, // accepted because empty & preserving L2 time invariant - }, - { - Name: "sequencer time drift on changing epoch with empty txs", - L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, - L2SafeHead: l2X0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1Z, - Batch: &SingularBatch{ - ParentHash: l2Y0.ParentHash, - EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), - EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time - Transactions: nil, - }, - }, - Expected: BatchAccept, // accepted because empty & still advancing epoch - }, - { - Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet", - L1Blocks: []eth.L1BlockRef{l1A}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: nil, - }, - }, - Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time - }, - { - Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: nil, - }, - }, - Expected: BatchDrop, // dropped because it could have advanced the epoch to B - }, - { - Name: "empty tx included", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{ - []byte{}, // empty tx data - }, - }, - }, - Expected: BatchDrop, - }, - { - Name: "deposit tx included", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{ - []byte{types.DepositTxType, 0}, // piece of data alike to a deposit - }, - }, - }, - Expected: BatchDrop, - }, - { - Name: "valid batch same epoch", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{ - []byte{0x02, 0x42, 0x13, 0x37}, - []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, - }, - }, - }, - Expected: BatchAccept, - }, - { - Name: "valid batch changing epoch", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: &SingularBatch{ - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{ - []byte{0x02, 0x42, 0x13, 0x37}, - []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, - }, - }, - }, - Expected: BatchAccept, - }, - { - Name: "batch with L2 time before L1 time", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ // we build l2B0', which starts a new epoch too early - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp() + defaultBlockTime*1000, - Transactions: nil, - }, - }, - Expected: BatchDrop, - }, + //{ + // Name: "old timestamp", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A0.MillisecondTimestamp(), // repeating the same time + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ // TODO: + // Name: "misaligned timestamp", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: (l2A1.Time - 1) * 1000, // block time is 2, so this is 1 too low + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "invalid parent block hash", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ + // ParentHash: testutils.RandomHash(rng), + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "sequence window expired", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D, l1E, l1F}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1F, // included in 5th block after epoch of batch, while seq window is 4 + // Batch: &SingularBatch{ + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "epoch too old, but good parent hash and timestamp", // repeat of now outdated l2A3 data + // L1Blocks: []eth.L1BlockRef{l1B, l1C, l1D}, + // L2SafeHead: l2B0, // we already moved on to B + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: &SingularBatch{ + // ParentHash: l2B0.Hash, // build on top of safe head to continue + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: (l2B0.Time + defaultBlockTime) * 1000, // pass the timestamp check to get too epoch check + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "insufficient L1 info for eager derivation", + // L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: &SingularBatch{ + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, + // Expected: BatchUndecided, + //}, + //{ + // Name: "epoch too new", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1D, + // Batch: &SingularBatch{ + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C + // EpochHash: l1C.Hash, + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "epoch hash wrong", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: &SingularBatch{ + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l1A.Hash, // invalid, epoch hash should be l1B + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "sequencer time drift on same epoch with non-empty txs", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ // this is the same test case as above, but with Fjord activated at the L1 origin, so accepted batch + // Name: "no sequencer time drift on same epoch with non-empty txs and Fjord", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{[]byte("sequencer should include this tx")}, + // }, + // }, + // ConfigMod: fjordAt(&l1A.Time), + // Expected: BatchAccept, + //}, + //{ + // Name: "sequencer time drift on changing epoch with non-empty txs", + // L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, + // L2SafeHead: l2X0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1Z, + // Batch: &SingularBatch{ + // ParentHash: l2Y0.ParentHash, + // EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), + // EpochHash: l2Y0.L1Origin.Hash, + // Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time + // Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "sequencer time drift on same epoch with empty txs and late next epoch", + // L1Blocks: []eth.L1BlockRef{l1A, l1BLate}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1BLate, + // Batch: &SingularBatch{ // l2A4 time < l1BLate time, so we cannot adopt origin B yet + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, + // Expected: BatchAccept, // accepted because empty & preserving L2 time invariant + //}, + //{ + // Name: "sequencer time drift on changing epoch with empty txs", + // L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, + // L2SafeHead: l2X0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1Z, + // Batch: &SingularBatch{ + // ParentHash: l2Y0.ParentHash, + // EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), + // EpochHash: l2Y0.L1Origin.Hash, + // Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time + // Transactions: nil, + // }, + // }, + // Expected: BatchAccept, // accepted because empty & still advancing epoch + //}, + //{ + // Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet", + // L1Blocks: []eth.L1BlockRef{l1A}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, + // Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time + //}, + //{ + // Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: &SingularBatch{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, // dropped because it could have advanced the epoch to B + //}, + //{ + // Name: "empty tx included", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{ + // []byte{}, // empty tx data + // }, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "deposit tx included", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{ + // []byte{types.DepositTxType, 0}, // piece of data alike to a deposit + // }, + // }, + // }, + // Expected: BatchDrop, + //}, + //{ + // Name: "valid batch same epoch", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{ + // []byte{0x02, 0x42, 0x13, 0x37}, + // []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, + // }, + // }, + // }, + // Expected: BatchAccept, + //}, + //{ + // Name: "valid batch changing epoch", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: &SingularBatch{ + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{ + // []byte{0x02, 0x42, 0x13, 0x37}, + // []byte{0x02, 0xde, 0xad, 0xbe, 0xef}, + // }, + // }, + // }, + // Expected: BatchAccept, + //}, + //{ + // Name: "batch with L2 time before L1 time", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ // we build l2B0', which starts a new epoch too early + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp() + defaultBlockTime*1000, + // Transactions: nil, + // }, + // }, + // Expected: BatchDrop, + //}, } spanBatchTestCases := []ValidBatchTestCase{ - { - Name: "missing L1 info", - L1Blocks: []eth.L1BlockRef{}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchUndecided, - ExpectedLog: "missing L1 block input, cannot proceed with batch checking", - ConfigMod: deltaAtGenesis, - }, - { - Name: "future timestamp", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: (l2A1.Time + 1) * 1000, // 1 too high - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchFuture, - ExpectedLog: "received out-of-order batch for future processing after next batch", - ConfigMod: deltaAtGenesis, - }, - { - Name: "misaligned timestamp", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: (l2A1.Time - 1) * 1000, // block time is 2, so this is 1 too low - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "span batch has no new blocks after safe head", - ConfigMod: deltaAtGenesis, - }, - { - Name: "invalid parent block hash", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: testutils.RandomHash(rng), - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "ignoring batch with mismatching parent hash", - ConfigMod: deltaAtGenesis, - }, - { - Name: "sequence window expired", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D, l1E, l1F}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1F, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch was included too late, sequence window expired", - ConfigMod: deltaAtGenesis, - }, - { - Name: "epoch too old, but good parent hash and timestamp", // repeat of now outdated l2A3 data - L1Blocks: []eth.L1BlockRef{l1B, l1C, l1D}, - L2SafeHead: l2B0, // we already moved on to B - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2B0.Hash, // build on top of safe head to continue - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid - EpochHash: l2A3.L1Origin.Hash, - Timestamp: (l2B0.Time + defaultBlockTime) * 1000, // pass the timestamp check to get too epoch check - Transactions: nil, - }, - { - EpochNum: rollup.Epoch(l1B.Number), - EpochHash: l1B.Hash, // pass the l1 origin check - Timestamp: (l2B0.Time + defaultBlockTime*2) * 1000, - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "dropped batch, epoch is too old", - ConfigMod: deltaAtGenesis, - }, - { - Name: "insufficient L1 info for eager derivation", - L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchUndecided, - ExpectedLog: "eager batch wants to advance epoch, but could not without more L1 blocks", - ConfigMod: deltaAtGenesis, - }, - { - Name: "insufficient L1 info for eager derivation - long span", - L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A3.ParentHash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchUndecided, - ExpectedLog: "need more l1 blocks to check entire origins of span batch", - ConfigMod: deltaAtGenesis, - }, - { - Name: "epoch too new", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1D, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C - EpochHash: l1C.Hash, - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid", - ConfigMod: deltaAtGenesis, - }, - { - Name: "epoch hash wrong", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch is for different L1 chain, epoch hash does not match", - ConfigMod: deltaAtGenesis, - }, - { - Name: "epoch hash wrong - long span", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: initializedSpanBatch([]*SingularBatch{ - { // valid batch - ParentHash: l2A3.ParentHash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l1A.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l1A.Hash, // invalid, epoch hash should be l1B - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch is for different L1 chain, epoch hash does not match", - ConfigMod: deltaAtGenesis, - }, - { - Name: "sequencer time drift on same epoch with non-empty txs", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", - ConfigMod: deltaAtGenesis, - }, - { - Name: "no sequencer time drift on same epoch with non-empty txs and Fjord", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchAccept, - ConfigMod: multiMod(deltaAtGenesis, fjordAt(&l1A.Time)), - }, - { - Name: "sequencer time drift on same epoch with non-empty txs - long span", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { // valid batch - ParentHash: l2A3.ParentHash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", - ConfigMod: deltaAtGenesis, - }, - { - Name: "sequencer time drift on changing epoch with non-empty txs", - L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, - L2SafeHead: l2X0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1Z, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2Y0.ParentHash, - EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), - EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time - Transactions: []hexutil.Bytes{randTxData}, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", - ConfigMod: deltaAtGenesis, - }, - { - Name: "sequencer time drift on same epoch with empty txs and late next epoch", - L1Blocks: []eth.L1BlockRef{l1A, l1BLate}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1BLate, - Batch: initializedSpanBatch([]*SingularBatch{ - { // l2A4 time < l1BLate time, so we cannot adopt origin B yet - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchAccept, // accepted because empty & preserving L2 time invariant - ConfigMod: deltaAtGenesis, - }, - { - Name: "sequencer time drift on changing epoch with empty txs", - L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, - L2SafeHead: l2X0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1Z, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2Y0.ParentHash, - EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), - EpochHash: l2Y0.L1Origin.Hash, - Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time - Transactions: nil, - }, - { - ParentHash: l2Z0.ParentHash, - EpochNum: rollup.Epoch(l2Z0.L1Origin.Number), - EpochHash: l2Z0.L1Origin.Hash, - Timestamp: l2Z0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchAccept, // accepted because empty & still advancing epoch - ConfigMod: deltaAtGenesis, - NotExpectedLog: "continuing with empty batch before late L1 block to preserve L2 time invariant", - }, - { - Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet", - L1Blocks: []eth.L1BlockRef{l1A}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time - ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid", - ConfigMod: deltaAtGenesis, - }, - { - Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet - long span", - L1Blocks: []eth.L1BlockRef{l1A}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { // valid batch - ParentHash: l2A3.ParentHash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time - ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid", - ConfigMod: deltaAtGenesis, - }, - { - Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: initializedSpanBatch([]*SingularBatch{ - { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, // dropped because it could have advanced the epoch to B - ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid", - ConfigMod: deltaAtGenesis, - }, - { - Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it - long span", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: initializedSpanBatch([]*SingularBatch{ - { // valid batch - ParentHash: l2A3.ParentHash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 - ParentHash: l2A4.ParentHash, - EpochNum: rollup.Epoch(l2A4.L1Origin.Number), - EpochHash: l2A4.L1Origin.Hash, - Timestamp: l2A4.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, // dropped because it could have advanced the epoch to B - ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid", - ConfigMod: deltaAtGenesis, - }, - { - Name: "empty tx included", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{ - []byte{}, // empty tx data - }, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "transaction data must not be empty, but found empty tx", - ConfigMod: deltaAtGenesis, - }, - { - Name: "deposit tx included", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{ - []byte{types.DepositTxType, 0}, // piece of data alike to a deposit - }, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "sequencers may not embed any deposits into batch data, but found tx that has one", - ConfigMod: deltaAtGenesis, - }, - { - Name: "valid batch same epoch", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchAccept, - ConfigMod: deltaAtGenesis, - }, - { - Name: "valid batch changing epoch", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1C, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2B0.ParentHash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchAccept, - ConfigMod: deltaAtGenesis, - }, - { - Name: "batch with L2 time before L1 time", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { // we build l2B0, which starts a new epoch too early - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: (l2A2.Time + defaultBlockTime) * 1000, - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "block timestamp is less than L1 origin timestamp", - ConfigMod: deltaAtGenesis, - }, - { - Name: "batch with L2 time before L1 time - long span", - L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, - L2SafeHead: l2A1, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { // valid batch - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp(), - Transactions: nil, - }, - { // we build l2B0, which starts a new epoch too early - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: (l2A2.Time + defaultBlockTime) * 1000, - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "block timestamp is less than L1 origin timestamp", - ConfigMod: deltaAtGenesis, - }, - { - Name: "valid overlapping batch", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchAccept, - ConfigMod: deltaAtGenesis, - }, - { - Name: "longer overlapping batch", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A0.Hash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchAccept, - ConfigMod: deltaAtGenesis, - }, - { - Name: "fully overlapping batch", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A0.Hash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "span batch has no new blocks after safe head", - ConfigMod: deltaAtGenesis, - }, - { - Name: "overlapping batch with invalid parent hash", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A0.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "ignoring batch with mismatching parent hash", - ConfigMod: deltaAtGenesis, - }, - { - Name: "overlapping batch with invalid origin number", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number) + 1, - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "overlapped block's L1 origin number does not match", - ConfigMod: deltaAtGenesis, - }, - { - Name: "overlapping batch with invalid tx", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A2, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - { - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "overlapped block's tx count does not match", - ConfigMod: deltaAtGenesis, - }, - { - Name: "overlapping batch l2 fetcher error", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A1, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A0.ParentHash, - EpochNum: rollup.Epoch(l2A0.L1Origin.Number), - EpochHash: l2A0.L1Origin.Hash, - Timestamp: l2A0.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A0.Hash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A2.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchUndecided, - ExpectedLog: "failed to fetch L2 block", - ConfigMod: deltaAtGenesis, - }, - { - Name: "short block time", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A0.Hash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: (l2A0.Time + 1) * 1000, - Transactions: nil, - }, - { - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: (l2A1.Time + 1) * 1000, - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch has misaligned timestamp, block time is too short", - ConfigMod: deltaAtGenesis, - }, - { - Name: "misaligned batch", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A0.Hash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: (l2A0.Time - 1) * 1000, - Transactions: nil, - }, - { - ParentHash: l2A1.Hash, - EpochNum: rollup.Epoch(l2A2.L1Origin.Number), - EpochHash: l2A2.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchDrop, - ExpectedLog: "batch has misaligned timestamp, not overlapped exactly", - ConfigMod: deltaAtGenesis, - }, - { - Name: "failed to fetch overlapping block payload", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A3, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A2.Hash, - EpochNum: rollup.Epoch(l2A3.L1Origin.Number), - EpochHash: l2A3.L1Origin.Hash, - Timestamp: l2A3.MillisecondTimestamp(), - Transactions: nil, - }, - { - ParentHash: l2A3.Hash, - EpochNum: rollup.Epoch(l2B0.L1Origin.Number), - EpochHash: l2B0.L1Origin.Hash, - Timestamp: l2B0.MillisecondTimestamp(), - Transactions: nil, - }, - }, uint64(0), big.NewInt(0)), - }, - Expected: BatchUndecided, - ExpectedLog: "failed to fetch L2 block payload", - ConfigMod: deltaAtGenesis, - }, - { - Name: "singular batch before hard fork", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, - ConfigMod: deltaAt(&l1B.Time), - Expected: BatchAccept, - }, - { - Name: "span batch before hard fork", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, uint64(0), big.NewInt(0)), - }, - ConfigMod: deltaAt(&l1B.Time), - Expected: BatchDrop, - ExpectedLog: "received SpanBatch with L1 origin before Delta hard fork", - }, - { - Name: "singular batch after hard fork", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: &SingularBatch{ - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, - ConfigMod: deltaAt(&l1A.Time), - Expected: BatchAccept, - }, - { - Name: "span batch after hard fork", - L1Blocks: []eth.L1BlockRef{l1A, l1B}, - L2SafeHead: l2A0, - Batch: BatchWithL1InclusionBlock{ - L1InclusionBlock: l1B, - Batch: initializedSpanBatch([]*SingularBatch{ - { - ParentHash: l2A1.ParentHash, - EpochNum: rollup.Epoch(l2A1.L1Origin.Number), - EpochHash: l2A1.L1Origin.Hash, - Timestamp: l2A1.MillisecondTimestamp(), - Transactions: []hexutil.Bytes{randTxData}, - }, - }, uint64(0), big.NewInt(0)), - }, - ConfigMod: deltaAt(&l1A.Time), - Expected: BatchAccept, - }, + //{ + // Name: "missing L1 info", + // L1Blocks: []eth.L1BlockRef{}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchUndecided, + // ExpectedLog: "missing L1 block input, cannot proceed with batch checking", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "future timestamp", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: (l2A1.Time + 1) * 1000, // 1 too high + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchFuture, + // ExpectedLog: "received out-of-order batch for future processing after next batch", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "misaligned timestamp", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: (l2A1.Time - 1) * 1000, // block time is 2, so this is 1 too low + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "span batch has no new blocks after safe head", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "invalid parent block hash", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: testutils.RandomHash(rng), + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "ignoring batch with mismatching parent hash", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "sequence window expired", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D, l1E, l1F}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1F, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch was included too late, sequence window expired", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "epoch too old, but good parent hash and timestamp", // repeat of now outdated l2A3 data + // L1Blocks: []eth.L1BlockRef{l1B, l1C, l1D}, + // L2SafeHead: l2B0, // we already moved on to B + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2B0.Hash, // build on top of safe head to continue + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: (l2B0.Time + defaultBlockTime) * 1000, // pass the timestamp check to get too epoch check + // Transactions: nil, + // }, + // { + // EpochNum: rollup.Epoch(l1B.Number), + // EpochHash: l1B.Hash, // pass the l1 origin check + // Timestamp: (l2B0.Time + defaultBlockTime*2) * 1000, + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "dropped batch, epoch is too old", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "insufficient L1 info for eager derivation", + // L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchUndecided, + // ExpectedLog: "eager batch wants to advance epoch, but could not without more L1 blocks", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "insufficient L1 info for eager derivation - long span", + // L1Blocks: []eth.L1BlockRef{l1A}, // don't know about l1B yet + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A3.ParentHash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchUndecided, + // ExpectedLog: "need more l1 blocks to check entire origins of span batch", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "epoch too new", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1D, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C + // EpochHash: l1C.Hash, + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "epoch hash wrong", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l1A.Hash, // invalid, epoch hash should be l1B + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch is for different L1 chain, epoch hash does not match", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "epoch hash wrong - long span", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // valid batch + // ParentHash: l2A3.ParentHash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l1A.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l1A.Hash, // invalid, epoch hash should be l1B + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch is for different L1 chain, epoch hash does not match", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "sequencer time drift on same epoch with non-empty txs", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "no sequencer time drift on same epoch with non-empty txs and Fjord", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchAccept, + // ConfigMod: multiMod(deltaAtGenesis, fjordAt(&l1A.Time)), + //}, + //{ + // Name: "sequencer time drift on same epoch with non-empty txs - long span", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // valid batch + // ParentHash: l2A3.ParentHash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "sequencer time drift on changing epoch with non-empty txs", + // L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, + // L2SafeHead: l2X0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1Z, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2Y0.ParentHash, + // EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), + // EpochHash: l2Y0.L1Origin.Hash, + // Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "sequencer time drift on same epoch with empty txs and late next epoch", + // L1Blocks: []eth.L1BlockRef{l1A, l1BLate}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1BLate, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // l2A4 time < l1BLate time, so we cannot adopt origin B yet + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchAccept, // accepted because empty & preserving L2 time invariant + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "sequencer time drift on changing epoch with empty txs", + // L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, + // L2SafeHead: l2X0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1Z, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2Y0.ParentHash, + // EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), + // EpochHash: l2Y0.L1Origin.Hash, + // Timestamp: l2Y0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time + // Transactions: nil, + // }, + // { + // ParentHash: l2Z0.ParentHash, + // EpochNum: rollup.Epoch(l2Z0.L1Origin.Number), + // EpochHash: l2Z0.L1Origin.Hash, + // Timestamp: l2Z0.MillisecondTimestamp(), // valid, but more than 6 ahead of l1Y.Time + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchAccept, // accepted because empty & still advancing epoch + // ConfigMod: deltaAtGenesis, + // NotExpectedLog: "continuing with empty batch before late L1 block to preserve L2 time invariant", + //}, + //{ + // Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet", + // L1Blocks: []eth.L1BlockRef{l1A}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time + // ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet - long span", + // L1Blocks: []eth.L1BlockRef{l1A}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // valid batch + // ParentHash: l2A3.ParentHash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time + // ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, // dropped because it could have advanced the epoch to B + // ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it - long span", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // valid batch + // ParentHash: l2A3.ParentHash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 + // ParentHash: l2A4.ParentHash, + // EpochNum: rollup.Epoch(l2A4.L1Origin.Number), + // EpochHash: l2A4.L1Origin.Hash, + // Timestamp: l2A4.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, // dropped because it could have advanced the epoch to B + // ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "empty tx included", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{ + // []byte{}, // empty tx data + // }, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "transaction data must not be empty, but found empty tx", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "deposit tx included", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{ + // []byte{types.DepositTxType, 0}, // piece of data alike to a deposit + // }, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "sequencers may not embed any deposits into batch data, but found tx that has one", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "valid batch same epoch", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchAccept, + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "valid batch changing epoch", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1C, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2B0.ParentHash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchAccept, + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "batch with L2 time before L1 time", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // we build l2B0, which starts a new epoch too early + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: (l2A2.Time + defaultBlockTime) * 1000, + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "block timestamp is less than L1 origin timestamp", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "batch with L2 time before L1 time - long span", + // L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, + // L2SafeHead: l2A1, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { // valid batch + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { // we build l2B0, which starts a new epoch too early + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: (l2A2.Time + defaultBlockTime) * 1000, + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "block timestamp is less than L1 origin timestamp", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "valid overlapping batch", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchAccept, + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "longer overlapping batch", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A0.Hash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchAccept, + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "fully overlapping batch", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A0.Hash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "span batch has no new blocks after safe head", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "overlapping batch with invalid parent hash", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A0.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "ignoring batch with mismatching parent hash", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "overlapping batch with invalid origin number", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number) + 1, + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "overlapped block's L1 origin number does not match", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "overlapping batch with invalid tx", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A2, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // { + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "overlapped block's tx count does not match", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "overlapping batch l2 fetcher error", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A1, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A0.ParentHash, + // EpochNum: rollup.Epoch(l2A0.L1Origin.Number), + // EpochHash: l2A0.L1Origin.Hash, + // Timestamp: l2A0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A0.Hash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A2.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchUndecided, + // ExpectedLog: "failed to fetch L2 block", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "short block time", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A0.Hash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: (l2A0.Time + 1) * 1000, + // Transactions: nil, + // }, + // { + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: (l2A1.Time + 1) * 1000, + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch has misaligned timestamp, block time is too short", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "misaligned batch", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A0.Hash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: (l2A0.Time - 1) * 1000, + // Transactions: nil, + // }, + // { + // ParentHash: l2A1.Hash, + // EpochNum: rollup.Epoch(l2A2.L1Origin.Number), + // EpochHash: l2A2.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchDrop, + // ExpectedLog: "batch has misaligned timestamp, not overlapped exactly", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "failed to fetch overlapping block payload", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A3, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A2.Hash, + // EpochNum: rollup.Epoch(l2A3.L1Origin.Number), + // EpochHash: l2A3.L1Origin.Hash, + // Timestamp: l2A3.MillisecondTimestamp(), + // Transactions: nil, + // }, + // { + // ParentHash: l2A3.Hash, + // EpochNum: rollup.Epoch(l2B0.L1Origin.Number), + // EpochHash: l2B0.L1Origin.Hash, + // Timestamp: l2B0.MillisecondTimestamp(), + // Transactions: nil, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // Expected: BatchUndecided, + // ExpectedLog: "failed to fetch L2 block payload", + // ConfigMod: deltaAtGenesis, + //}, + //{ + // Name: "singular batch before hard fork", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, + // ConfigMod: deltaAt(&l1B.Time), + // Expected: BatchAccept, + //}, + //{ + // Name: "span batch before hard fork", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // ConfigMod: deltaAt(&l1B.Time), + // Expected: BatchDrop, + // ExpectedLog: "received SpanBatch with L1 origin before Delta hard fork", + //}, + //{ + // Name: "singular batch after hard fork", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: &SingularBatch{ + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, + // ConfigMod: deltaAt(&l1A.Time), + // Expected: BatchAccept, + //}, + //{ + // Name: "span batch after hard fork", + // L1Blocks: []eth.L1BlockRef{l1A, l1B}, + // L2SafeHead: l2A0, + // Batch: BatchWithL1InclusionBlock{ + // L1InclusionBlock: l1B, + // Batch: initializedSpanBatch([]*SingularBatch{ + // { + // ParentHash: l2A1.ParentHash, + // EpochNum: rollup.Epoch(l2A1.L1Origin.Number), + // EpochHash: l2A1.L1Origin.Hash, + // Timestamp: l2A1.MillisecondTimestamp(), + // Transactions: []hexutil.Bytes{randTxData}, + // }, + // }, uint64(0), big.NewInt(0)), + // }, + // ConfigMod: deltaAt(&l1A.Time), + // Expected: BatchAccept, + //}, } // Log level can be increased for debugging purposes @@ -1580,7 +1590,7 @@ func TestValidBatch(t *testing.T) { ctx := context.Background() rcfg := defaultConf() if mod := testCase.ConfigMod; mod != nil { - mod(rcfg) + mod(rcfg, &testCase) } // TODO validity := CheckBatch(ctx, rcfg, logger, testCase.L1Blocks, testCase.L2SafeHead, &testCase.Batch, &l2Client) @@ -1645,14 +1655,14 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, EpochNum: rollup.Epoch(l2B1.L1Origin.Number), EpochHash: l2B1.L1Origin.Hash, - Timestamp: l2B1.MillisecondTimestamp(), + Timestamp: l2B1.Time * 1000, Transactions: []hexutil.Bytes{randTxData}, // Random generated TX that does not match overlapping block }, { ParentHash: l2B1.Hash, EpochNum: rollup.Epoch(l2B2.L1Origin.Number), EpochHash: l2B2.L1Origin.Hash, - Timestamp: l2B2.MillisecondTimestamp(), + Timestamp: l2B2.Time * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), @@ -1679,6 +1689,7 @@ func TestValidBatch(t *testing.T) { } l2Client.Mock.On("PayloadByNumber", l2B1.Number).Return(&payload, &nilErr).Once() + // TODO: TargetBlockNumber invalidTxTestCase := ValidBatchTestCase{ Name: "invalid_tx_overlapping_batch", L1Blocks: []eth.L1BlockRef{l1B}, @@ -1690,21 +1701,21 @@ func TestValidBatch(t *testing.T) { ParentHash: l2B0.Hash, EpochNum: rollup.Epoch(l2B1.L1Origin.Number), EpochHash: l2B1.L1Origin.Hash, - Timestamp: l2B1.MillisecondTimestamp(), + Timestamp: l2B1.Time * 1000, Transactions: []hexutil.Bytes{randTxData}, }, { ParentHash: l2B1.Hash, EpochNum: rollup.Epoch(l2B2.L1Origin.Number), EpochHash: l2B2.L1Origin.Hash, - Timestamp: l2B2.MillisecondTimestamp(), + Timestamp: l2B2.Time * 1000, Transactions: nil, }, }, uint64(0), big.NewInt(0)), }, Expected: BatchDrop, ExpectedLog: "failed to extract L2BlockRef from execution payload", - ConfigMod: deltaAtGenesis, + ConfigMod: setDeltaAndL2Time, } t.Run(invalidTxTestCase.Name, func(t *testing.T) { diff --git a/op-node/rollup/derive/channel_out_test.go b/op-node/rollup/derive/channel_out_test.go index 9c5d038c9..1395e328f 100644 --- a/op-node/rollup/derive/channel_out_test.go +++ b/op-node/rollup/derive/channel_out_test.go @@ -103,7 +103,8 @@ func TestOutputFrameNoEmptyLastFrame(t *testing.T) { txCount := 1 singularBatch := RandomSingularBatch(rng, txCount, chainID) - err := cout.AddSingularBatch(singularBatch, 0) + var cfg rollup.Config + err := cout.AddSingularBatch(&cfg, singularBatch, 0) var written uint64 require.NoError(t, err) @@ -259,7 +260,8 @@ func TestSpanChannelOut(t *testing.T) { func SpanChannelOutCompressionOnlyOneBatch(t *testing.T, algo CompressionAlgo) { cout, singularBatches := SpanChannelAndBatches(t, 300, 2, algo) - err := cout.AddSingularBatch(singularBatches[0], 0) + var cfg rollup.Config + err := cout.AddSingularBatch(&cfg, singularBatches[0], 0) // confirm compression was not skipped require.Greater(t, cout.compressor.Len(), 0) require.NoError(t, err) @@ -268,7 +270,7 @@ func SpanChannelOutCompressionOnlyOneBatch(t *testing.T, algo CompressionAlgo) { require.ErrorIs(t, cout.FullErr(), ErrCompressorFull) // confirm adding another batch would cause the same full error - err = cout.AddSingularBatch(singularBatches[1], 0) + err = cout.AddSingularBatch(&cfg, singularBatches[1], 0) require.ErrorIs(t, err, ErrCompressorFull) } @@ -277,7 +279,8 @@ func SpanChannelOutCompressionUndo(t *testing.T, algo CompressionAlgo) { // target is larger than one batch, but smaller than two batches cout, singularBatches := SpanChannelAndBatches(t, 750, 2, algo) - err := cout.AddSingularBatch(singularBatches[0], 0) + var cfg rollup.Config + err := cout.AddSingularBatch(&cfg, singularBatches[0], 0) require.NoError(t, err) // confirm that the first compression was skipped if algo == Zlib { @@ -288,7 +291,7 @@ func SpanChannelOutCompressionUndo(t *testing.T, algo CompressionAlgo) { // record the RLP length to confirm it doesn't change when adding a rejected batch rlp1 := cout.activeRLP().Len() - err = cout.AddSingularBatch(singularBatches[1], 0) + err = cout.AddSingularBatch(&cfg, singularBatches[1], 0) require.ErrorIs(t, err, ErrCompressorFull) // confirm that the second compression was not skipped require.Greater(t, cout.compressor.Len(), 0) @@ -303,7 +306,8 @@ func SpanChannelOutClose(t *testing.T, algo CompressionAlgo) { target := uint64(600) cout, singularBatches := SpanChannelAndBatches(t, target, 1, algo) - err := cout.AddSingularBatch(singularBatches[0], 0) + var cfg rollup.Config + err := cout.AddSingularBatch(&cfg, singularBatches[0], 0) require.NoError(t, err) // confirm no compression has happened yet diff --git a/op-node/rollup/derive/l1_block_info_test.go b/op-node/rollup/derive/l1_block_info_test.go index b87c9108d..1f94e585c 100644 --- a/op-node/rollup/derive/l1_block_info_test.go +++ b/op-node/rollup/derive/l1_block_info_test.go @@ -139,7 +139,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg := rollup.Config{ RegolithTime: &zero, EcotoneTime: &zero, - BlockTime: 2000, + BlockTime: 2, } depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 2000) require.NoError(t, err) @@ -154,7 +154,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg := rollup.Config{ RegolithTime: &zero, EcotoneTime: &zero, - BlockTime: 2000, + BlockTime: 2, } depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0) require.NoError(t, err) diff --git a/op-node/rollup/derive/span_batch.go b/op-node/rollup/derive/span_batch.go index 6bbc0ff33..62e5f776d 100644 --- a/op-node/rollup/derive/span_batch.go +++ b/op-node/rollup/derive/span_batch.go @@ -366,13 +366,13 @@ func (b *RawSpanBatch) derive(rollupCfg *rollup.Config, genesisTimestamp uint64, if rollupCfg.VoltaTime != nil && *rollupCfg.VoltaTime > genesisTimestamp { secondSinceVolta := *rollupCfg.VoltaTime - genesisTimestamp if b.relTimestamp >= secondSinceVolta { - blockInterval = rollup.VoltBlockTime + blockInterval = rollup.MillisecondBlockIntervalVolta millisecondTimestamp = true } else { - blockInterval = rollup.BeforeVoltBlockTime + blockInterval = rollupCfg.BlockTime * 1000 } } else { - blockInterval = rollup.BeforeVoltBlockTime + blockInterval = rollupCfg.BlockTime * 1000 } spanBatch := SpanBatch{ @@ -444,8 +444,6 @@ type SpanBatch struct { originBits *big.Int blockTxCounts []uint64 sbtxs *spanBatchTxs - - cfg *rollup.Config } func (b *SpanBatch) AsSingularBatch() (*SingularBatch, bool) { return nil, false } diff --git a/op-node/rollup/derive/span_batch_test.go b/op-node/rollup/derive/span_batch_test.go index 8c39696bf..d6efe29ac 100644 --- a/op-node/rollup/derive/span_batch_test.go +++ b/op-node/rollup/derive/span_batch_test.go @@ -348,10 +348,11 @@ func TestSpanBatchDerive(t *testing.T) { spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID) // set originChangedBit to match the original test implementation spanBatch.setFirstOriginChangedBit(uint(originChangedBit)) - rawSpanBatch, err := spanBatch.ToRawSpanBatch() + var cfg rollup.Config + rawSpanBatch, err := spanBatch.ToRawSpanBatch(&cfg) require.NoError(t, err) - spanBatchDerived, err := rawSpanBatch.derive(l2BlockTime, genesisTimeStamp, chainID) + spanBatchDerived, err := rawSpanBatch.derive(&cfg, genesisTimeStamp, chainID) require.NoError(t, err) blockCount := len(singularBatches) @@ -404,7 +405,8 @@ func TestSpanBatchMerge(t *testing.T) { spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID) // set originChangedBit to match the original test implementation spanBatch.setFirstOriginChangedBit(uint(originChangedBit)) - rawSpanBatch, err := spanBatch.ToRawSpanBatch() + var cfg rollup.Config + rawSpanBatch, err := spanBatch.ToRawSpanBatch(&cfg) require.NoError(t, err) // check span batch prefix @@ -450,7 +452,8 @@ func TestSpanBatchToSingularBatch(t *testing.T) { spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID) // set originChangedBit to match the original test implementation spanBatch.setFirstOriginChangedBit(uint(originChangedBit)) - rawSpanBatch, err := spanBatch.ToRawSpanBatch() + var cfg rollup.Config + rawSpanBatch, err := spanBatch.ToRawSpanBatch(&cfg) require.NoError(t, err) l1Origins := mockL1Origin(rng, rawSpanBatch, singularBatches) diff --git a/op-node/rollup/driver/metered_engine.go b/op-node/rollup/driver/metered_engine.go index 01b5fc4f4..5bcaa9d2a 100644 --- a/op-node/rollup/driver/metered_engine.go +++ b/op-node/rollup/driver/metered_engine.go @@ -74,7 +74,8 @@ func (m *MeteredEngine) ConfirmPayload(ctx context.Context, agossip async.AsyncG sealTime := now.Sub(sealingStart) buildTime := now.Sub(m.buildingStartTime) m.metrics.RecordSequencerSealingTime(sealTime) - m.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(rollup.VoltBlockTime)*time.Millisecond) + blockInterval := m.cfg.MillisecondBlockInterval(m.inner.UnsafeL2Head().MillisecondTimestamp()) + m.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(blockInterval)*time.Millisecond) txnCount := len(payload.ExecutionPayload.Transactions) m.metrics.CountSequencedTxs(txnCount) diff --git a/op-node/rollup/driver/origin_selector_test.go b/op-node/rollup/driver/origin_selector_test.go index b02f4f44e..fa3a9c12f 100644 --- a/op-node/rollup/driver/origin_selector_test.go +++ b/op-node/rollup/driver/origin_selector_test.go @@ -25,7 +25,7 @@ func TestOriginSelectorAdvances(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2000, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -67,7 +67,7 @@ func TestOriginSelectorRespectsOriginTiming(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2000, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -108,7 +108,7 @@ func TestOriginSelectorRespectsConfDepth(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 500, - BlockTime: 2000, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -151,7 +151,7 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2000, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -191,7 +191,7 @@ func TestOriginSelector_FjordSeqDrift(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2000, + BlockTime: 2, FjordTime: u64ptr(20), // a's timestamp } l1 := &testutils.MockL1Source{} @@ -225,7 +225,7 @@ func TestOriginSelectorSeqDriftRespectsNextOriginTime(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2000, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -268,7 +268,7 @@ func TestOriginSelectorHandlesLateL1Blocks(t *testing.T) { log := testlog.Logger(t, log.LevelCrit) cfg := &rollup.Config{ MaxSequencerDrift: 8, - BlockTime: 2000, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) diff --git a/op-node/rollup/driver/sequencer_test.go b/op-node/rollup/driver/sequencer_test.go index 0b04ffe1c..786a52c87 100644 --- a/op-node/rollup/driver/sequencer_test.go +++ b/op-node/rollup/driver/sequencer_test.go @@ -184,7 +184,7 @@ func TestSequencerChaosMonkey(t *testing.T) { L2Time: l1Time + 300, // L2 may start with a relative old L1 origin and will have to catch it up SystemConfig: eth.SystemConfig{}, }, - BlockTime: 2000, + BlockTime: 2, MaxSequencerDrift: 30, } // keep track of the L1 timestamps we mock because sometimes we only have the L1 hash/num handy @@ -259,7 +259,7 @@ func TestSequencerChaosMonkey(t *testing.T) { testGasLimit := eth.Uint64Quantity(10_000_000) return ð.PayloadAttributes{ - Timestamp: eth.Uint64Quantity(l2Parent.Time + cfg.SecondBlockInterval()), + Timestamp: eth.Uint64Quantity(l2Parent.Time + cfg.BlockTime), PrevRandao: eth.Bytes32{}, SuggestedFeeRecipient: common.Address{}, Transactions: []eth.Data{infoDep}, @@ -375,7 +375,7 @@ func TestSequencerChaosMonkey(t *testing.T) { l2Head := engControl.UnsafeL2Head() t.Logf("avg build time: %s, clock timestamp: %d, L2 head time: %d, L1 origin time: %d, avg txs per block: %f", engControl.avgBuildingTime(), clockFn().Unix(), l2Head.Time, l1Times[l2Head.L1Origin], engControl.avgTxsPerBlock()) require.Equal(t, engControl.totalBuiltBlocks, desiredBlocks, "persist through random errors and build the desired blocks") - require.Equal(t, l2Head.MillisecondTimestamp(), cfg.Genesis.L2Time*1000+uint64(desiredBlocks)*cfg.BlockTime, "reached desired L2 block timestamp") + require.Equal(t, l2Head.MillisecondTimestamp(), (cfg.Genesis.L2Time+uint64(desiredBlocks)*cfg.BlockTime)*1000, "reached desired L2 block timestamp") require.GreaterOrEqual(t, l2Head.Time, l1Times[l2Head.L1Origin], "the L2 time >= the L1 time") require.Less(t, l2Head.Time-l1Times[l2Head.L1Origin], uint64(100), "The L1 origin time is close to the L2 time") require.Less(t, clockTime.Sub(time.Unix(int64(l2Head.Time), 0)).Abs(), 2*time.Second, "L2 time is accurate, within 2 seconds of wallclock") diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index f1c5bd5f1..10501f6d9 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -269,7 +269,7 @@ func (s *Driver) eventLoop() { // Create a ticker to check if there is a gap in the engine queue. Whenever // there is, we send requests to sync source to retrieve the missing payloads. - syncCheckInterval := time.Duration(rollup.VoltBlockTime) * time.Millisecond * 2 + syncCheckInterval := time.Duration(rollup.MillisecondBlockIntervalVolta) * time.Millisecond * 2 altSyncTicker := time.NewTicker(syncCheckInterval) defer altSyncTicker.Stop() lastUnsafeL2 := s.engineController.UnsafeL2Head() diff --git a/op-node/rollup/superchain.go b/op-node/rollup/superchain.go index 3714dce1f..acb5daa53 100644 --- a/op-node/rollup/superchain.go +++ b/op-node/rollup/superchain.go @@ -75,7 +75,7 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) { // but since none of the superchain chains differ, it's not represented in the superchain-registry yet. // This restriction on superchain-chains may change in the future. // Test/Alt configurations can still load custom rollup-configs when necessary. - //BlockTime: 2, + BlockTime: 2, MaxSequencerDrift: 600, SeqWindowSize: 3600, ChannelTimeout: 300, diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 7537c175e..61a91fa69 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -35,10 +35,10 @@ var ( ErrL2ChainIDNotPositive = errors.New("L2 chain ID must be non-zero and positive") ) -var ( - VoltBlockTime uint64 = 500 - BeforeVoltBlockTime uint64 = 1000 -) +//var ( +// VoltBlockTime uint64 = 500 +// BeforeVoltBlockTime uint64 = 1000 +//) // NetworkNames are user friendly names to use in the chain spec banner. var NetworkNames = map[string]string{ @@ -78,9 +78,8 @@ type Config struct { // Genesis anchor point of the rollup Genesis Genesis `json:"genesis"` // BlockTime is the interval configuration of L2 block; - // which supports the new millisecond unit and is compatible with the legacy second unit. - // Temp delete, will reset after developing - //BlockTime uint64 `json:"block_time"` + // which will be abandoned after the Volta fork. + BlockTime uint64 `json:"block_time"` // Sequencer batches may not be more than MaxSequencerDrift seconds after // the L1 timestamp of the sequencing window end. // @@ -169,19 +168,20 @@ type Config struct { LegacyUsePlasma bool `json:"use_plasma,omitempty"` } -//const millisecondBlockIntervalVolta = 500 +const MillisecondBlockIntervalVolta = 500 func (cfg *Config) MillisecondBlockInterval(millisecondTimestamp uint64) uint64 { if cfg.IsVolta(millisecondTimestamp / 1000) { - return VoltBlockTime + return MillisecondBlockIntervalVolta } - return BeforeVoltBlockTime + return cfg.BlockTime * 1000 } func (cfg *Config) SecondBlockInterval(millisecondTimestamp uint64) uint64 { return cfg.MillisecondBlockInterval(millisecondTimestamp) / 1000 } +// TODO: func (cfg *Config) NextMillisecondBlockTime(millisecondTimestamp uint64) uint64 { return millisecondTimestamp + cfg.MillisecondBlockInterval(millisecondTimestamp) } @@ -194,11 +194,11 @@ func (c *Config) IsVolta(timestamp uint64) bool { return c.VoltaTime != nil && timestamp >= *c.VoltaTime } -func (c *Config) VoltaBlocNumber() uint64 { +func (c *Config) VoltaBlockNumber() uint64 { if c.VoltaTime == nil || *c.VoltaTime == 0 { return 0 } - return (*c.VoltaTime-c.Genesis.L2Time)/(BeforeVoltBlockTime/1000) + c.Genesis.L2.Number + return (*c.VoltaTime-c.Genesis.L2Time)/c.BlockTime + c.Genesis.L2.Number } func (c *Config) IsVoltaActivationBlock(l2BlockMillisecondTime uint64) bool { @@ -207,8 +207,8 @@ func (c *Config) IsVoltaActivationBlock(l2BlockMillisecondTime uint64) bool { } l2BlockTime := l2BlockMillisecondTime / 1000 return c.IsVolta(l2BlockTime) && - l2BlockTime >= BeforeVoltBlockTime/1000 && - !c.IsVolta(l2BlockTime-BeforeVoltBlockTime/1000) + l2BlockTime >= c.BlockTime && + !c.IsVolta(l2BlockTime-c.BlockTime) } // MillisecondBlockInterval returns millisecond block interval, which has compatible conversions. @@ -263,16 +263,16 @@ func (cfg *Config) ValidateL2Config(ctx context.Context, client L2Client, skipL2 } func (cfg *Config) MillisecondTimestampForBlock(blockNumber uint64) uint64 { - voltaBlockNumber := cfg.VoltaBlocNumber() + voltaBlockNumber := cfg.VoltaBlockNumber() if voltaBlockNumber == 0 || blockNumber <= voltaBlockNumber { - return cfg.Genesis.L2Time*1000 + ((blockNumber - cfg.Genesis.L2.Number) * BeforeVoltBlockTime) + return cfg.Genesis.L2Time*1000 + (blockNumber-cfg.Genesis.L2.Number)*cfg.BlockTime*1000 } else { - return voltaBlockNumber + *cfg.VoltaTime*1000 + (blockNumber-voltaBlockNumber)*VoltBlockTime + return *cfg.VoltaTime*1000 + (blockNumber-voltaBlockNumber)*MillisecondBlockIntervalVolta } } func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err error) { - voltaBlockNumber := cfg.VoltaBlocNumber() + voltaBlockNumber := cfg.VoltaBlockNumber() if voltaBlockNumber == 0 || milliTimestamp <= *cfg.VoltaTime*1000 { // subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that // difference by the block time to get the expected L2 block number at the current time. If the @@ -283,12 +283,12 @@ func (cfg *Config) TargetBlockNumber(milliTimestamp uint64) (num uint64, err err } wallClockGenesisDiff := milliTimestamp - genesisMilliTimestamp // Note: round down, we should not request blocks into the future. - blocksSinceGenesis := wallClockGenesisDiff / BeforeVoltBlockTime + blocksSinceGenesis := wallClockGenesisDiff / (cfg.BlockTime * 1000) return cfg.Genesis.L2.Number + blocksSinceGenesis, nil } else { voltaMilliTimestamp := *cfg.VoltaTime * 1000 wallClockGenesisDiff := milliTimestamp - voltaMilliTimestamp - blocksSinceVolta := wallClockGenesisDiff / VoltBlockTime + blocksSinceVolta := wallClockGenesisDiff / MillisecondBlockIntervalVolta return voltaBlockNumber + blocksSinceVolta, nil } } @@ -353,9 +353,9 @@ func (cfg *Config) CheckL2GenesisBlockHash(ctx context.Context, client L2Client) // Check verifies that the given configuration makes sense func (cfg *Config) Check() error { - //if cfg.BlockTime == 0 { - // return ErrBlockTimeZero - //} + if cfg.BlockTime == 0 { + return ErrBlockTimeZero + } if cfg.ChannelTimeout == 0 { return ErrMissingChannelTimeout } diff --git a/op-node/rollup/types_test.go b/op-node/rollup/types_test.go index d427104a5..002af317b 100644 --- a/op-node/rollup/types_test.go +++ b/op-node/rollup/types_test.go @@ -534,7 +534,7 @@ func TestTimestampForBlock(t *testing.T) { name: "FirstBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2000, + blockTime: 2, blockNum: 0, expectedBlockTime: 100, }, @@ -542,7 +542,7 @@ func TestTimestampForBlock(t *testing.T) { name: "SecondBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2000, + blockTime: 2, blockNum: 1, expectedBlockTime: 102, }, @@ -550,7 +550,7 @@ func TestTimestampForBlock(t *testing.T) { name: "NBlock", genesisTime: 100, genesisBlock: 0, - blockTime: 2000, + blockTime: 2, blockNum: 25, expectedBlockTime: 150, }, @@ -563,7 +563,7 @@ func TestTimestampForBlock(t *testing.T) { config.Genesis.L2.Number = test.genesisBlock config.BlockTime = test.blockTime - timestamp := config.TimestampForBlock(test.blockNum) + timestamp := config.MillisecondTimestampForBlock(test.blockNum) / 1000 assert.Equal(t, timestamp, test.expectedBlockTime) }) } diff --git a/op-node/service.go b/op-node/service.go index 0422ef42a..5f10f36dd 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -27,11 +27,11 @@ import ( opflags "github.com/ethereum-optimism/optimism/op-service/flags" ) -const ( - minSecondBlockInterval = 1 - maxSecondBlockInterval = 3 - maxMillisecondBlockInterval = 750 -) +//const ( +// //minSecondBlockInterval = 1 +// //maxSecondBlockInterval = 3 +// //maxMillisecondBlockInterval = 750 +//) // NewConfig creates a Config from the provided flags or environment variables. func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {