diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index a8aee6f774..366cce89fd 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -125,7 +125,7 @@ func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *ty // Check >0 TDs with pre-merge, --0 TDs with post-merge rules if header.Difficulty.Sign() > 0 || // OP-Stack: transitioned networks must use legacy consensus pre-Bedrock - cfg.IsOptimismBedrock(header.Number) { + cfg.IsOptimismPreBedrock(header.Number) { return beacon.ethone.VerifyHeader(chain, header) } return beacon.verifyHeader(chain, header, parent) @@ -410,6 +410,16 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea } } + // Store DA footprint in BlobGasUsed header field if it hasn't already been set yet. + // Builder code may already calculate it during block building to avoid recalculating it here. + if chain.Config().IsDAFootprintBlockLimit(header.Time) && (header.BlobGasUsed == nil || *header.BlobGasUsed == 0) { + daFootprint, err := types.CalcDAFootprint(body.Transactions) + if err != nil { + return nil, fmt.Errorf("error calculating DA footprint: %w", err) + } + header.BlobGasUsed = &daFootprint + } + // Assemble the final block. block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil), chain.Config()) diff --git a/consensus/misc/eip1559/eip1559.go b/consensus/misc/eip1559/eip1559.go index 373d6f83ef..eb5dfed657 100644 --- a/consensus/misc/eip1559/eip1559.go +++ b/consensus/misc/eip1559/eip1559.go @@ -36,7 +36,7 @@ func VerifyEIP1559Header(config *params.ChainConfig, parent, header *types.Heade if !config.IsLondon(parent.Number) { parentGasLimit = parent.GasLimit * config.ElasticityMultiplier() } - if config.Optimism == nil { // gasLimit can adjust instantly in optimism + if !config.IsOptimism() { // OP Stack gasLimit can adjust instantly if err := misc.VerifyGaslimit(parentGasLimit, header.GasLimit); err != nil { return err } @@ -75,7 +75,7 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header, time uint64) } // OPStack addition: calculate the base fee using the upstream code. - baseFee := calcBaseFeeInner(parent, elasticity, denominator) + baseFee := calcBaseFeeInner(config, parent, elasticity, denominator) // OPStack addition: enforce minimum base fee. // If the minimum base fee is 0, this has no effect. @@ -89,10 +89,20 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header, time uint64) return baseFee } -func calcBaseFeeInner(parent *types.Header, elasticity uint64, denominator uint64) *big.Int { +func calcBaseFeeInner(config *params.ChainConfig, parent *types.Header, elasticity uint64, denominator uint64) *big.Int { parentGasTarget := parent.GasLimit / elasticity - // If the parent gasUsed is the same as the target, the baseFee remains unchanged. - if parent.GasUsed == parentGasTarget { + parentGasMetered := parent.GasUsed + if config.IsDAFootprintBlockLimit(parent.Time) { + if parent.BlobGasUsed == nil { + panic("Jovian parent block has nil BlobGasUsed") + } else if *parent.BlobGasUsed > parent.GasUsed { + // Jovian updates the base fee based on the maximum of total transactions gas used and total DA footprint (which is + // stored in the BlobGasUsed field of the header). + parentGasMetered = *parent.BlobGasUsed + } + } + // If the parent gasMetered is the same as the target, the baseFee remains unchanged. + if parentGasMetered == parentGasTarget { return new(big.Int).Set(parent.BaseFee) } @@ -101,10 +111,10 @@ func calcBaseFeeInner(parent *types.Header, elasticity uint64, denominator uint6 denom = new(big.Int) ) - if parent.GasUsed > parentGasTarget { + if parentGasMetered > parentGasTarget { // If the parent block used more gas than its target, the baseFee should increase. // max(1, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator) - num.SetUint64(parent.GasUsed - parentGasTarget) + num.SetUint64(parentGasMetered - parentGasTarget) num.Mul(num, parent.BaseFee) num.Div(num, denom.SetUint64(parentGasTarget)) num.Div(num, denom.SetUint64(denominator)) @@ -115,7 +125,7 @@ func calcBaseFeeInner(parent *types.Header, elasticity uint64, denominator uint6 } else { // Otherwise if the parent block used less gas than its target, the baseFee should decrease. // max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator) - num.SetUint64(parentGasTarget - parent.GasUsed) + num.SetUint64(parentGasTarget - parentGasMetered) num.Mul(num, parent.BaseFee) num.Div(num, denom.SetUint64(parentGasTarget)) num.Div(num, denom.SetUint64(denominator)) diff --git a/consensus/misc/eip1559/eip1559_test.go b/consensus/misc/eip1559/eip1559_test.go index b8139f963b..9e86341099 100644 --- a/consensus/misc/eip1559/eip1559_test.go +++ b/consensus/misc/eip1559/eip1559_test.go @@ -57,17 +57,19 @@ func config() *params.ChainConfig { return config } -var TestCanyonTime = uint64(10) -var TestHoloceneTime = uint64(12) -var TestJovianTime = uint64(14) +var ( + testCanyonTime = uint64(10) + testHoloceneTime = uint64(12) + testJovianTime = uint64(14) +) func opConfig() *params.ChainConfig { config := copyConfig(params.TestChainConfig) config.LondonBlock = big.NewInt(5) eip1559DenominatorCanyon := uint64(250) - config.CanyonTime = &TestCanyonTime - config.HoloceneTime = &TestHoloceneTime - config.JovianTime = &TestJovianTime + config.CanyonTime = &testCanyonTime + config.HoloceneTime = &testHoloceneTime + config.JovianTime = &testJovianTime config.Optimism = ¶ms.OptimismConfig{ EIP1559Elasticity: 6, EIP1559Denominator: 50, @@ -227,59 +229,74 @@ func TestCalcBaseFeeOptimismHolocene(t *testing.T) { // TestCalcBaseFeeJovian tests that the minimum base fee is enforced // when the computed base fee is less than the minimum base fee, // if the feature is active and not enforced otherwise. +// It also tests that the base fee udpate will take the DA footprint as stored +// in the blob gas used field into account if it is larger than the gas used +// field. func TestCalcBaseFeeJovian(t *testing.T) { parentGasLimit := uint64(30_000_000) denom := uint64(50) elasticity := uint64(3) + parentGasTarget := parentGasLimit / elasticity + const zeroParentBlobGasUsed = 0 - preJovian := TestJovianTime - 1 - postJovian := TestJovianTime + preJovian := testJovianTime - 1 + postJovian := testJovianTime tests := []struct { - parentBaseFee int64 - parentGasUsed uint64 - parentTime uint64 - minBaseFee uint64 - expectedBaseFee uint64 + parentBaseFee int64 + parentGasUsed uint64 + parentBlobGasUsed uint64 + parentTime uint64 + minBaseFee uint64 + expectedBaseFee uint64 }{ // Test 0: gas used is below target, and the new calculated base fee is very low. // But since we are pre Jovian, we don't enforce the minBaseFee. - {1, parentGasLimit/elasticity - 1_000_000, preJovian, 1e9, 1}, + {1, parentGasTarget - 1_000_000, zeroParentBlobGasUsed, preJovian, 1e9, 1}, // Test 1: gas used is exactly the target gas, but the base fee is set too low so // the base fee is expected to be the minBaseFee - {1, parentGasLimit / elasticity, postJovian, 1e9, 1e9}, + {1, parentGasTarget, zeroParentBlobGasUsed, postJovian, 1e9, 1e9}, // Test 2: gas used exceeds gas target, but the new calculated base fee is still // too low so the base fee is expected to be the minBaseFee - {1, parentGasLimit/elasticity + 1_000_000, postJovian, 1e9, 1e9}, + {1, parentGasTarget + 1_000_000, zeroParentBlobGasUsed, postJovian, 1e9, 1e9}, // Test 3: gas used exceeds gas target, but the new calculated base fee is higher // than the minBaseFee, so don't enforce minBaseFee. See the calculation below: // gasUsedDelta = gasUsed - parentGasTarget = 20_000_000 - 30_000_000 / 3 = 10_000_000 // 2e9 * 10_000_000 / 10_000_000 / 50 = 40_000_000 // 2e9 + 40_000_000 = 2_040_000_000, which is greater than minBaseFee - {2e9, parentGasLimit/elasticity + 10_000_000, postJovian, 1e9, 2_040_000_000}, + {2e9, parentGasTarget + 10_000_000, zeroParentBlobGasUsed, postJovian, 1e9, 2_040_000_000}, // Test 4: gas used is below target, but the new calculated base fee is still // too low so the base fee is expected to be the minBaseFee - {1, parentGasLimit/elasticity - 1_000_000, postJovian, 1e9, 1e9}, + {1, parentGasTarget - 1_000_000, zeroParentBlobGasUsed, postJovian, 1e9, 1e9}, // Test 5: gas used is below target, and the new calculated base fee is higher // than the minBaseFee, so don't enforce minBaseFee. See the calculation below: // gasUsedDelta = gasUsed - parentGasTarget = 9_000_000 - 30_000_000 / 3 = -1_000_000 // 2_097_152 * -1_000_000 / 10_000_000 / 50 = -4194.304 // 2_097_152 - 4194.304 = 2_092_957.696, which is greater than minBaseFee - {2_097_152, parentGasLimit/elasticity - 1_000_000, postJovian, 2e6, 2_092_958}, + {2_097_152, parentGasTarget - 1_000_000, zeroParentBlobGasUsed, postJovian, 2e6, 2_092_958}, // Test 6: parent base fee already at minimum, below target => no change - {1e4, parentGasLimit/elasticity - 1, postJovian, 1e4, 1e4}, + {1e4, parentGasTarget - 1, zeroParentBlobGasUsed, postJovian, 1e4, 1e4}, // Test 7: parent base fee already at minimum, above target => small increase as usual - {1e4, parentGasLimit/elasticity + 1, postJovian, 1e4, 1e4 + 1}, + {1e4, parentGasTarget + 1, zeroParentBlobGasUsed, postJovian, 1e4, 1e4 + 1}, + + // Test 8: Pre-Jovian: parent base fee already at minimum, gas used at target, blob gas used at limit + // => no increase, minBaseFee ignored, high blob gas used ignored + {1e4, parentGasTarget, parentGasLimit, preJovian, 1e6, 1e4}, + // Test 9: parent base fee already at minimum, gas used at target, da footprint above target => small increase + {1e4, parentGasTarget, parentGasTarget + 1, postJovian, 1e4, 1e4 + 1}, + // Test 10: Test 3, but with high blob gas used instead of gas used + {2e9, parentGasTarget, parentGasTarget + 10_000_000, postJovian, 1e9, 2_040_000_000}, } for i, test := range tests { testName := fmt.Sprintf("test %d", i) t.Run(testName, func(t *testing.T) { parent := &types.Header{ - Number: common.Big32, - GasLimit: parentGasLimit, - GasUsed: test.parentGasUsed, - BaseFee: big.NewInt(test.parentBaseFee), - Time: test.parentTime, + Number: common.Big32, + GasLimit: parentGasLimit, + GasUsed: test.parentGasUsed, + BlobGasUsed: &test.parentBlobGasUsed, + BaseFee: big.NewInt(test.parentBaseFee), + Time: test.parentTime, } parent.Extra = EncodeOptimismExtraData(opConfig(), test.parentTime, denom, elasticity, &test.minBaseFee) have, want := CalcBaseFee(opConfig(), parent, parent.Time+2), big.NewInt(int64(test.expectedBaseFee)) diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go index 875eec7a91..0e82afa154 100644 --- a/consensus/misc/eip4844/eip4844.go +++ b/consensus/misc/eip4844/eip4844.go @@ -110,12 +110,16 @@ func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Heade return errors.New("header is missing blobGasUsed") } - // Verify that the blob gas used remains within reasonable limits. - if !config.IsOptimism() && *header.BlobGasUsed > bcfg.maxBlobGas() { - return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, bcfg.maxBlobGas()) - } - if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 { - return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) + // OP Stack sets a zero blobGasUsed pre-Jovian. Post-Jovian, it stores the DA footprint, which is + // probably not a multiple of [params.BlobTxBlobGasPerBlob]. + if !config.IsOptimism() { + // Verify that the blob gas used remains within reasonable limits. + if *header.BlobGasUsed > bcfg.maxBlobGas() { + return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, bcfg.maxBlobGas()) + } + if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 { + return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) + } } // Verify the excessBlobGas is correct based on the parent header diff --git a/core/block_validator.go b/core/block_validator.go index 33987fa369..a6a598fc71 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -106,7 +106,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { } // Check blob gas usage. - if header.BlobGasUsed != nil { + if (!v.config.IsOptimism() || v.config.IsL2Blob(header.Number, header.Time)) && header.BlobGasUsed != nil { if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated return fmt.Errorf("blob gas used mismatch (header %v, calculated %v)", *header.BlobGasUsed, blobs*params.BlobTxBlobGasPerBlob) } @@ -116,6 +116,23 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { } } + // OP Stack Jovian DA footprint block limit. + if v.config.IsDAFootprintBlockLimit(header.Time) { + if header.BlobGasUsed == nil { + return errors.New("nil blob gas used in post-Jovian block header, should store DA footprint") + } + blobGasUsed := *header.BlobGasUsed + daFootprint, err := types.CalcDAFootprint(block.Transactions()) + if err != nil { + return fmt.Errorf("failed to calculate DA footprint: %w", err) + } else if blobGasUsed != daFootprint { + return fmt.Errorf("invalid DA footprint in blobGasUsed field (remote: %d local: %d)", blobGasUsed, daFootprint) + } + if daFootprint > block.GasLimit() { + return fmt.Errorf("DA footprint %d exceeds block gas limit %d", daFootprint, block.GasLimit()) + } + } + // Ancestor block must be known. if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { diff --git a/core/blockchain.go b/core/blockchain.go index 8f4eb804ee..365b864dc0 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -64,7 +64,6 @@ var ( headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) headFinalizedBlockGauge = metrics.NewRegisteredGauge("chain/head/finalized", nil) headSafeBlockGauge = metrics.NewRegisteredGauge("chain/head/safe", nil) - headBaseFeeGauge = metrics.NewRegisteredGauge("chain/head/basefee", nil) chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil) chainMgaspsMeter = metrics.NewRegisteredResettingTimer("chain/mgasps", nil) @@ -1230,7 +1229,9 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) { bc.currentBlock.Store(block.Header()) headBlockGauge.Update(int64(block.NumberU64())) - headBaseFeeGauge.TryUpdate(block.Header().BaseFee) + + // OPStack addition + updateOptimismBlockMetrics(block.Header()) } // stopWithoutSaving stops the blockchain service. If any imports are currently in progress @@ -1398,7 +1399,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ bc.currentSnapBlock.Store(header) headHeaderGauge.Update(header.Number.Int64()) headFastBlockGauge.Update(header.Number.Int64()) - headBaseFeeGauge.TryUpdate(header.BaseFee) + + // OPStack addition + updateOptimismBlockMetrics(header) return nil } // writeAncient writes blockchain and corresponding receipt chain into ancient store. @@ -2771,7 +2774,9 @@ func (bc *BlockChain) InsertHeadersBeforeCutoff(headers []*types.Header) (int, e bc.currentSnapBlock.Store(last) headHeaderGauge.Update(last.Number.Int64()) headFastBlockGauge.Update(last.Number.Int64()) - headBaseFeeGauge.TryUpdate(last.BaseFee) + + // OPStack addition + updateOptimismBlockMetrics(last) return 0, nil } diff --git a/core/blockchain_optimism.go b/core/blockchain_optimism.go new file mode 100644 index 0000000000..fa7ee47390 --- /dev/null +++ b/core/blockchain_optimism.go @@ -0,0 +1,27 @@ +package core + +import ( + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/metrics" +) + +// OPStack additions +var ( + headBaseFeeGauge = metrics.NewRegisteredGauge("chain/head/basefee", nil) + headGasUsedGauge = metrics.NewRegisteredGauge("chain/head/gas_used", nil) + headBlobGasUsedGauge = metrics.NewRegisteredGauge("chain/head/blob_gas_used", nil) + + headGasUsedHist = metrics.NewRegisteredHistogram("chain/head/gas_used_hist", nil, metrics.NewExpDecaySample(1028, 0.015)) + headBlobGasUsedHist = metrics.NewRegisteredHistogram("chain/head/blob_gas_used_hist", nil, metrics.NewExpDecaySample(1028, 0.015)) +) + +func updateOptimismBlockMetrics(header *types.Header) error { + headBaseFeeGauge.TryUpdate(header.BaseFee) + headGasUsedGauge.Update(int64(header.GasUsed)) + headBlobGasUsedGauge.TryUpdateUint64(header.BlobGasUsed) + headGasUsedHist.Update(int64(header.GasUsed)) + if header.BlobGasUsed != nil { + headBlobGasUsedHist.Update(int64(*header.BlobGasUsed)) + } + return nil +} diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index dc6e6fe817..c78ff23cd6 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -76,10 +76,16 @@ func TestCreation(t *testing.T) { {20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block {20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // First Shanghai block {30000000, 1710338134, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // Last Shanghai block - {40000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // First Cancun block + {30000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // First Cancun block {30000000, 1746022486, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // Last Cancun block - {30000000, 1746612311, ID{Hash: checksumToBytes(0xc376cf8b), Next: 0}}, // First Prague block - {50000000, 2000000000, ID{Hash: checksumToBytes(0xc376cf8b), Next: 0}}, // Future Prague block + {30000000, 1746612311, ID{Hash: checksumToBytes(0xc376cf8b), Next: 1764798551}}, // First Prague block + {30000000, 1764798550, ID{Hash: checksumToBytes(0xc376cf8b), Next: 1764798551}}, // Last Prague block + {30000000, 1764798551, ID{Hash: checksumToBytes(0x5167e2a6), Next: 1765290071}}, // First Osaka block + {30000000, 1765290070, ID{Hash: checksumToBytes(0x5167e2a6), Next: 1765290071}}, // Last Osaka block + {30000000, 1765290071, ID{Hash: checksumToBytes(0xcba2a1c0), Next: 1767747671}}, // First BPO1 block + {30000000, 1767747670, ID{Hash: checksumToBytes(0xcba2a1c0), Next: 1767747671}}, // Last BPO1 block + {30000000, 1767747671, ID{Hash: checksumToBytes(0x07c9462e), Next: 0}}, // First BPO2 block + {50000000, 2000000000, ID{Hash: checksumToBytes(0x07c9462e), Next: 0}}, // Future BPO2 block }, }, // Sepolia test cases @@ -162,6 +168,9 @@ func TestValidation(t *testing.T) { legacyConfig.ShanghaiTime = nil legacyConfig.CancunTime = nil legacyConfig.PragueTime = nil + legacyConfig.OsakaTime = nil + legacyConfig.BPO1Time = nil + legacyConfig.BPO2Time = nil tests := []struct { config *params.ChainConfig @@ -361,11 +370,11 @@ func TestValidation(t *testing.T) { // Local is mainnet Shanghai, remote is random Shanghai. {params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale}, - // Local is mainnet Prague, far in the future. Remote announces Gopherium (non existing fork) + // Local is mainnet BPO2, far in the future. Remote announces Gopherium (non existing fork) // at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible. // // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). - {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xc376cf8b), Next: 8888888888}, ErrLocalIncompatibleOrStale}, + {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x07c9462e), Next: 8888888888}, ErrLocalIncompatibleOrStale}, // Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing // fork) at timestamp 1668000000, before Cancun. Local is incompatible. diff --git a/core/headerchain.go b/core/headerchain.go index 4174aadef1..ca403535e1 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -92,7 +92,9 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c } hc.currentHeaderHash = hc.CurrentHeader().Hash() headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) - headBaseFeeGauge.TryUpdate(hc.CurrentHeader().BaseFee) + + // OPStack addition + updateOptimismBlockMetrics(hc.CurrentHeader()) return hc, nil } @@ -183,7 +185,9 @@ func (hc *HeaderChain) Reorg(headers []*types.Header) error { hc.currentHeaderHash = last.Hash() hc.currentHeader.Store(types.CopyHeader(last)) headHeaderGauge.Update(last.Number.Int64()) - headBaseFeeGauge.TryUpdate(last.BaseFee) + + // OPStack addition + updateOptimismBlockMetrics(last) return nil } @@ -486,7 +490,9 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { hc.currentHeader.Store(head) hc.currentHeaderHash = head.Hash() headHeaderGauge.Update(head.Number.Int64()) - headBaseFeeGauge.TryUpdate(head.BaseFee) + + // OPStack addition + updateOptimismBlockMetrics(head) } type ( @@ -573,7 +579,9 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat hc.currentHeader.Store(parent) hc.currentHeaderHash = parentHash headHeaderGauge.Update(parent.Number.Int64()) - headBaseFeeGauge.TryUpdate(parent.BaseFee) + + // OPStack addition + updateOptimismBlockMetrics(parent) // If this is the first iteration, wipe any leftover data upwards too so // we don't end up with dangling daps in the database diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 598f250de6..197e8985ef 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -55,6 +55,12 @@ const ( // tiny overflows causing all txs to move a shelf higher, wasting disk space. txAvgSize = 4 * 1024 + // txBlobOverhead is an approximation of the overhead that an additional blob + // has on transaction size. This is added to the slotter to avoid tiny + // overflows causing all txs to move a shelf higher, wasting disk space. A + // small buffer is added to the proof overhead. + txBlobOverhead = uint32(kzg4844.CellProofsPerBlob*len(kzg4844.Proof{}) + 64) + // txMaxSize is the maximum size a single transaction can have, outside // the included blobs. Since blob transactions are pulled instead of pushed, // and only a small metadata is kept in ram, the rest is on disk, there is @@ -83,6 +89,10 @@ const ( // limboedTransactionStore is the subfolder containing the currently included // but not yet finalized transaction blobs. limboedTransactionStore = "limbo" + + // storeVersion is the current slotter layout used for the billy.Database + // store. + storeVersion = 1 ) // blobTxMeta is the minimal subset of types.BlobTx necessary to validate and @@ -389,6 +399,14 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser } p.head, p.state = head, state + // Create new slotter for pre-Osaka blob configuration. + slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) + + // See if we need to migrate the queue blob store after fusaka + slotter, err = tryMigrate(p.chain.Config(), slotter, queuedir) + if err != nil { + return err + } // Index all transactions on disk and delete anything unprocessable var fails []uint64 index := func(id uint64, size uint32, blob []byte) { @@ -396,7 +414,6 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser fails = append(fails, id) } } - slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, slotter, index) if err != nil { return err @@ -430,7 +447,7 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser // Pool initialized, attach the blob limbo to it to track blobs included // recently but not yet finalized - p.limbo, err = newLimbo(limbodir, eip4844.LatestMaxBlobsPerBlock(p.chain.Config())) + p.limbo, err = newLimbo(p.chain.Config(), limbodir) if err != nil { p.Close() return err diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index aaa7aed3e3..551c854d9b 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -984,7 +984,7 @@ func TestOpenCap(t *testing.T) { storage := t.TempDir() os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotterEIP7594(testMaxBlobsPerBlock), nil) // Insert a few transactions from a few accounts var ( @@ -1006,7 +1006,7 @@ func TestOpenCap(t *testing.T) { keep = []common.Address{addr1, addr3} drop = []common.Address{addr2} - size = uint64(2 * (txAvgSize + blobSize)) + size = 2 * (txAvgSize + blobSize + uint64(txBlobOverhead)) ) store.Put(blob1) store.Put(blob2) @@ -1015,7 +1015,7 @@ func TestOpenCap(t *testing.T) { // Verify pool capping twice: first by reducing the data cap, then restarting // with a high cap to ensure everything was persisted previously - for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} { + for _, datacap := range []uint64{2 * (txAvgSize + blobSize + uint64(txBlobOverhead)), 1000 * (txAvgSize + blobSize + uint64(txBlobOverhead))} { // Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) @@ -1163,6 +1163,115 @@ func TestChangingSlotterSize(t *testing.T) { } } +// TestBillyMigration tests the billy migration from the default slotter to +// the PeerDAS slotter. This tests both the migration of the slotter +// as well as increasing the slotter size of the new slotter. +func TestBillyMigration(t *testing.T) { + //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) + + // Create a temporary folder for the persistent backend + storage := t.TempDir() + + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + os.MkdirAll(filepath.Join(storage, limboedTransactionStore), 0700) + // Create the billy with the old slotter + oldSlotter := newSlotterEIP7594(6) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, oldSlotter, nil) + + // Create transactions from a few accounts. + var ( + key1, _ = crypto.GenerateKey() + key2, _ = crypto.GenerateKey() + key3, _ = crypto.GenerateKey() + + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + addr3 = crypto.PubkeyToAddress(key3.PublicKey) + + tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0) + tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0) + tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0) + + blob1, _ = rlp.EncodeToBytes(tx1) + blob2, _ = rlp.EncodeToBytes(tx2) + ) + + // Write the two safely sized txs to store. note: although the store is + // configured for a blob count of 6, it can also support around ~1mb of call + // data - all this to say that we aren't using the the absolute largest shelf + // available. + store.Put(blob1) + store.Put(blob2) + store.Close() + + // Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24. + for _, maxBlobs := range []int{6, 24} { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) + statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) + statedb.Commit(0, true, false) + + // Make custom chain config where the max blob count changes based on the loop variable. + zero := uint64(0) + config := ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + LondonBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + CancunTime: &zero, + OsakaTime: &zero, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{ + Cancun: ¶ms.BlobConfig{ + Target: maxBlobs / 2, + Max: maxBlobs, + UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction, + }, + Osaka: ¶ms.BlobConfig{ + Target: maxBlobs / 2, + Max: maxBlobs, + UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction, + }, + }, + } + chain := &testBlockChain{ + config: config, + basefee: uint256.NewInt(1050), + blobfee: uint256.NewInt(105), + statedb: statedb, + } + pool := New(Config{Datadir: storage}, chain, nil) + if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil { + t.Fatalf("failed to create blob pool: %v", err) + } + + // Try to add the big blob tx. In the initial iteration it should overflow + // the pool. On the subsequent iteration it should be accepted. + errs := pool.Add([]*types.Transaction{tx3}, true) + if _, ok := pool.index[addr3]; ok && maxBlobs == 6 { + t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0]) + } else if !ok && maxBlobs == 10 { + t.Errorf("expected insert of oversized blob tx to succeed: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0]) + } + + // Verify the regular two txs are always available. + if got := pool.Get(tx1.Hash()); got == nil { + t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1) + } + if got := pool.Get(tx2.Hash()); got == nil { + t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2) + } + + // Verify all the calculated pool internals. Interestingly, this is **not** + // a duplication of the above checks, this actually validates the verifier + // using the above already hard coded checks. + // + // Do not remove this, nor alter the above to be generic. + verifyPoolInternals(t, pool) + + pool.Close() + } +} + // TestBlobCountLimit tests the blobpool enforced limits on the max blob count. func TestBlobCountLimit(t *testing.T) { var ( diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go index 99d1b4ad6b..50c40c9d83 100644 --- a/core/txpool/blobpool/limbo.go +++ b/core/txpool/blobpool/limbo.go @@ -20,8 +20,10 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/billy" ) @@ -48,11 +50,21 @@ type limbo struct { } // newLimbo opens and indexes a set of limboed blob transactions. -func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) { +func newLimbo(config *params.ChainConfig, datadir string) (*limbo, error) { l := &limbo{ index: make(map[common.Hash]uint64), groups: make(map[uint64]map[uint64]common.Hash), } + + // Create new slotter for pre-Osaka blob configuration. + slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(config)) + + // See if we need to migrate the limbo after fusaka. + slotter, err := tryMigrate(config, slotter, datadir) + if err != nil { + return nil, err + } + // Index all limboed blobs on disk and delete anything unprocessable var fails []uint64 index := func(id uint64, size uint32, data []byte) { @@ -60,7 +72,7 @@ func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) { fails = append(fails, id) } } - store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(maxBlobsPerTransaction), index) + store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, slotter, index) if err != nil { return nil, err } diff --git a/core/txpool/blobpool/slotter.go b/core/txpool/blobpool/slotter.go index 84ccc0f27b..9b793e366c 100644 --- a/core/txpool/blobpool/slotter.go +++ b/core/txpool/blobpool/slotter.go @@ -16,6 +16,49 @@ package blobpool +import ( + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/billy" +) + +// tryMigrate checks if the billy needs to be migrated and migrates if needed. +// Returns a slotter that can be used for the database. +func tryMigrate(config *params.ChainConfig, slotter billy.SlotSizeFn, datadir string) (billy.SlotSizeFn, error) { + // Check if we need to migrate our blob db to the new slotter. + if config.OsakaTime != nil { + // Open the store using the version slotter to see if any version has been + // written. + var version int + index := func(_ uint64, _ uint32, blob []byte) { + version = max(version, parseSlotterVersion(blob)) + } + store, err := billy.Open(billy.Options{Path: datadir}, newVersionSlotter(), index) + if err != nil { + return nil, err + } + store.Close() + + // If the version found is less than the currently configured store version, + // perform a migration then write the updated version of the store. + if version < storeVersion { + newSlotter := newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config)) + if err := billy.Migrate(billy.Options{Path: datadir, Repair: true}, slotter, newSlotter); err != nil { + return nil, err + } + store, err = billy.Open(billy.Options{Path: datadir}, newVersionSlotter(), nil) + if err != nil { + return nil, err + } + writeSlotterVersion(store, storeVersion) + store.Close() + } + // Set the slotter to the format now that the Osaka is active. + slotter = newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config)) + } + return slotter, nil +} + // newSlotter creates a helper method for the Billy datastore that returns the // individual shelf sizes used to store transactions in. // @@ -25,7 +68,7 @@ package blobpool // The slotter also creates a shelf for 0-blob transactions. Whilst those are not // allowed in the current protocol, having an empty shelf is not a relevant use // of resources, but it makes stress testing with junk transactions simpler. -func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) { +func newSlotter(maxBlobsPerTransaction int) billy.SlotSizeFn { slotsize := uint32(txAvgSize) slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return @@ -36,3 +79,42 @@ func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) { return slotsize, finished } } + +// newSlotterEIP7594 creates a different slotter for EIP-7594 transactions. +// EIP-7594 (PeerDAS) changes the average transaction size which means the current +// static 4KB average size is not enough anymore. +// This slotter adds a dynamic overhead component to the slotter, which also +// captures the notion that blob transactions with more blobs are also more likely to +// to have more calldata. +func newSlotterEIP7594(maxBlobsPerTransaction int) billy.SlotSizeFn { + slotsize := uint32(txAvgSize) + slotsize -= uint32(blobSize) + txBlobOverhead // underflows, it's ok, will overflow back in the first return + + return func() (size uint32, done bool) { + slotsize += blobSize + txBlobOverhead + finished := slotsize > uint32(maxBlobsPerTransaction)*(blobSize+txBlobOverhead)+txMaxSize + + return slotsize, finished + } +} + +// newVersionSlotter creates a slotter with a single 8 byte shelf to store +// version metadata in. +func newVersionSlotter() billy.SlotSizeFn { + return func() (size uint32, done bool) { + return 8, true + } +} + +// parseSlotterVersion will parse the slotter's version from a given data blob. +func parseSlotterVersion(blob []byte) int { + if len(blob) > 0 { + return int(blob[0]) + } + return 0 +} + +// writeSlotterVersion writes the current slotter version into the store. +func writeSlotterVersion(store billy.Database, version int) { + store.Put([]byte{byte(version)}) +} diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go index 8d46f47d2c..e4cf232f4e 100644 --- a/core/txpool/blobpool/slotter_test.go +++ b/core/txpool/blobpool/slotter_test.go @@ -16,7 +16,9 @@ package blobpool -import "testing" +import ( + "testing" +) // Tests that the slotter creates the expected database shelves. func TestNewSlotter(t *testing.T) { @@ -58,3 +60,44 @@ func TestNewSlotter(t *testing.T) { } } } + +// Tests that the slotter creates the expected database shelves. +func TestNewSlotterEIP7594(t *testing.T) { + // Generate the database shelve sizes + slotter := newSlotterEIP7594(6) + + var shelves []uint32 + for { + shelf, done := slotter() + shelves = append(shelves, shelf) + if done { + break + } + } + // Compare the database shelves to the expected ones + want := []uint32{ + 0*blobSize + 0*txBlobOverhead + txAvgSize, // 0 blob + some expected tx infos + 1*blobSize + 1*txBlobOverhead + txAvgSize, // 1 blob + some expected tx infos + 2*blobSize + 2*txBlobOverhead + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data) + 3*blobSize + 3*txBlobOverhead + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data) + 4*blobSize + 4*txBlobOverhead + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data) + 5*blobSize + 5*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 6*blobSize + 6*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 7*blobSize + 7*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 8*blobSize + 8*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 9*blobSize + 9*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 10*blobSize + 10*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 11*blobSize + 11*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 12*blobSize + 12*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 13*blobSize + 13*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 14*blobSize + 14*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size + } + if len(shelves) != len(want) { + t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want)) + } + for i := 0; i < len(shelves) && i < len(want); i++ { + if shelves[i] != want[i] { + t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i]) + } + } +} diff --git a/core/types/block.go b/core/types/block.go index 31d7c6a9d8..1a3f0f1773 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -96,6 +96,7 @@ type Header struct { WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` // BlobGasUsed was added by EIP-4844 and is ignored in legacy headers. + // OP Stack stores the DA footprint in this field starting with the Jovian fork. BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"` // ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers. diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go index def81319aa..b3735d7156 100644 --- a/core/types/gen_receipt_json.go +++ b/core/types/gen_receipt_json.go @@ -42,6 +42,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { L1BlobBaseFeeScalar *hexutil.Uint64 `json:"l1BlobBaseFeeScalar,omitempty"` OperatorFeeScalar *hexutil.Uint64 `json:"operatorFeeScalar,omitempty"` OperatorFeeConstant *hexutil.Uint64 `json:"operatorFeeConstant,omitempty"` + DAFootprintGasScalar *hexutil.Uint64 `json:"daFootprintGasScalar,omitempty"` } var enc Receipt enc.Type = hexutil.Uint64(r.Type) @@ -68,12 +69,9 @@ func (r Receipt) MarshalJSON() ([]byte, error) { enc.FeeScalar = r.FeeScalar enc.L1BaseFeeScalar = (*hexutil.Uint64)(r.L1BaseFeeScalar) enc.L1BlobBaseFeeScalar = (*hexutil.Uint64)(r.L1BlobBaseFeeScalar) - if r.OperatorFeeScalar != nil { - enc.OperatorFeeScalar = (*hexutil.Uint64)(r.OperatorFeeScalar) - } - if r.OperatorFeeConstant != nil { - enc.OperatorFeeConstant = (*hexutil.Uint64)(r.OperatorFeeConstant) - } + enc.OperatorFeeScalar = (*hexutil.Uint64)(r.OperatorFeeScalar) + enc.OperatorFeeConstant = (*hexutil.Uint64)(r.OperatorFeeConstant) + enc.DAFootprintGasScalar = (*hexutil.Uint64)(r.DAFootprintGasScalar) return json.Marshal(&enc) } @@ -106,6 +104,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { L1BlobBaseFeeScalar *hexutil.Uint64 `json:"l1BlobBaseFeeScalar,omitempty"` OperatorFeeScalar *hexutil.Uint64 `json:"operatorFeeScalar,omitempty"` OperatorFeeConstant *hexutil.Uint64 `json:"operatorFeeConstant,omitempty"` + DAFootprintGasScalar *hexutil.Uint64 `json:"daFootprintGasScalar,omitempty"` } var dec Receipt if err := json.Unmarshal(input, &dec); err != nil { @@ -194,5 +193,8 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { if dec.OperatorFeeConstant != nil { r.OperatorFeeConstant = (*uint64)(dec.OperatorFeeConstant) } + if dec.DAFootprintGasScalar != nil { + r.DAFootprintGasScalar = (*uint64)(dec.DAFootprintGasScalar) + } return nil } diff --git a/core/types/receipt.go b/core/types/receipt.go index 3f64dd6411..e385df2d3b 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -86,15 +86,16 @@ type Receipt struct { TransactionIndex uint `json:"transactionIndex"` // Optimism: extend receipts with L1 and operator fee info - L1GasPrice *big.Int `json:"l1GasPrice,omitempty"` // Present from pre-bedrock. L1 Basefee after Bedrock - L1BlobBaseFee *big.Int `json:"l1BlobBaseFee,omitempty"` // Always nil prior to the Ecotone hardfork - L1GasUsed *big.Int `json:"l1GasUsed,omitempty"` // Present from pre-bedrock, deprecated as of Fjord - L1Fee *big.Int `json:"l1Fee,omitempty"` // Present from pre-bedrock - FeeScalar *big.Float `json:"l1FeeScalar,omitempty"` // Present from pre-bedrock to Ecotone. Nil after Ecotone - L1BaseFeeScalar *uint64 `json:"l1BaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork - L1BlobBaseFeeScalar *uint64 `json:"l1BlobBaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork - OperatorFeeScalar *uint64 `json:"operatorFeeScalar,omitempty"` // Always nil prior to the Isthmus hardfork - OperatorFeeConstant *uint64 `json:"operatorFeeConstant,omitempty"` // Always nil prior to the Isthmus hardfork + L1GasPrice *big.Int `json:"l1GasPrice,omitempty"` // Present from pre-bedrock. L1 Basefee after Bedrock + L1BlobBaseFee *big.Int `json:"l1BlobBaseFee,omitempty"` // Always nil prior to the Ecotone hardfork + L1GasUsed *big.Int `json:"l1GasUsed,omitempty"` // Present from pre-bedrock, deprecated as of Fjord + L1Fee *big.Int `json:"l1Fee,omitempty"` // Present from pre-bedrock + FeeScalar *big.Float `json:"l1FeeScalar,omitempty"` // Present from pre-bedrock to Ecotone. Nil after Ecotone + L1BaseFeeScalar *uint64 `json:"l1BaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork + L1BlobBaseFeeScalar *uint64 `json:"l1BlobBaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork + OperatorFeeScalar *uint64 `json:"operatorFeeScalar,omitempty"` // Always nil prior to the Isthmus hardfork + OperatorFeeConstant *uint64 `json:"operatorFeeConstant,omitempty"` // Always nil prior to the Isthmus hardfork + DAFootprintGasScalar *uint64 `json:"daFootprintGasScalar,omitempty"` // Always nil prior to the Jovian hardfork } type receiptMarshaling struct { @@ -121,6 +122,7 @@ type receiptMarshaling struct { DepositReceiptVersion *hexutil.Uint64 OperatorFeeScalar *hexutil.Uint64 OperatorFeeConstant *hexutil.Uint64 + DAFootprintGasScalar *hexutil.Uint64 } // receiptRLP is the consensus encoding of a receipt. @@ -612,26 +614,8 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, blockHash common.Has logIndex += uint(len(rs[i].Logs)) } - if config.Optimism != nil && len(txs) >= 2 && config.IsBedrock(new(big.Int).SetUint64(blockNumber)) { - gasParams, err := extractL1GasParams(config, blockTime, txs[0].Data()) - if err != nil { - return err - } - for i := 0; i < len(rs); i++ { - if txs[i].IsDepositTx() { - continue - } - rs[i].L1GasPrice = gasParams.l1BaseFee - rs[i].L1BlobBaseFee = gasParams.l1BlobBaseFee - rs[i].L1Fee, rs[i].L1GasUsed = gasParams.costFunc(txs[i].RollupCostData()) - rs[i].FeeScalar = gasParams.feeScalar - rs[i].L1BaseFeeScalar = u32ptrTou64ptr(gasParams.l1BaseFeeScalar) - rs[i].L1BlobBaseFeeScalar = u32ptrTou64ptr(gasParams.l1BlobBaseFeeScalar) - if gasParams.operatorFeeScalar != nil && gasParams.operatorFeeConstant != nil && (*gasParams.operatorFeeScalar != 0 || *gasParams.operatorFeeConstant != 0) { - rs[i].OperatorFeeScalar = u32ptrTou64ptr(gasParams.operatorFeeScalar) - rs[i].OperatorFeeConstant = gasParams.operatorFeeConstant - } - } + if config.IsOptimismBedrock(new(big.Int).SetUint64(blockNumber)) && len(txs) >= 2 { + return rs.deriveOPStackFields(config, blockTime, txs) } return nil } diff --git a/core/types/receipt_opstack.go b/core/types/receipt_opstack.go new file mode 100644 index 0000000000..9bc69efca5 --- /dev/null +++ b/core/types/receipt_opstack.go @@ -0,0 +1,54 @@ +package types + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/params" +) + +// deriveOPStackFields derives the OP Stack specific fields for each receipt. +// It must only be called for blocks with at least one transaction (the L1 attributes deposit). +func (rs Receipts) deriveOPStackFields(config *params.ChainConfig, blockTime uint64, txs []*Transaction) error { + // Exit early if there are only deposit transactions, for which no fields are derived. + if txs[len(txs)-1].IsDepositTx() { + return nil + } + + l1AttributesData := txs[0].Data() + gasParams, err := extractL1GasParams(config, blockTime, l1AttributesData) + if err != nil { + return fmt.Errorf("failed to extract L1 gas params: %w", err) + } + + var daFootprintGasScalar uint64 + isJovian := config.IsJovian(blockTime) + if isJovian { + scalar, err := ExtractDAFootprintGasScalar(l1AttributesData) + if err != nil { + return fmt.Errorf("failed to extract DA footprint gas scalar: %w", err) + } + daFootprintGasScalar = uint64(scalar) + } + + for i := range rs { + if txs[i].IsDepositTx() { + continue + } + rs[i].L1GasPrice = gasParams.l1BaseFee + rs[i].L1BlobBaseFee = gasParams.l1BlobBaseFee + rcd := txs[i].RollupCostData() + rs[i].L1Fee, rs[i].L1GasUsed = gasParams.costFunc(rcd) + rs[i].FeeScalar = gasParams.feeScalar + rs[i].L1BaseFeeScalar = u32ptrTou64ptr(gasParams.l1BaseFeeScalar) + rs[i].L1BlobBaseFeeScalar = u32ptrTou64ptr(gasParams.l1BlobBaseFeeScalar) + if gasParams.operatorFeeScalar != nil && gasParams.operatorFeeConstant != nil && (*gasParams.operatorFeeScalar != 0 || *gasParams.operatorFeeConstant != 0) { + rs[i].OperatorFeeScalar = u32ptrTou64ptr(gasParams.operatorFeeScalar) + rs[i].OperatorFeeConstant = gasParams.operatorFeeConstant + } + if isJovian { + rs[i].DAFootprintGasScalar = &daFootprintGasScalar + rs[i].BlobGasUsed = daFootprintGasScalar * rcd.EstimatedDASize().Uint64() + } + } + return nil +} diff --git a/core/types/receipt_opstack_test.go b/core/types/receipt_opstack_test.go new file mode 100644 index 0000000000..c49bc7bbd4 --- /dev/null +++ b/core/types/receipt_opstack_test.go @@ -0,0 +1,500 @@ +package types + +import ( + "bytes" + "encoding/json" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/kylelemons/godebug/diff" + "github.com/stretchr/testify/require" +) + +var ( + bedrockGenesisTestConfig = func() *params.ChainConfig { + conf := *params.AllCliqueProtocolChanges // copy the config + conf.Clique = nil + conf.BedrockBlock = big.NewInt(0) + conf.Optimism = ¶ms.OptimismConfig{EIP1559Elasticity: 50, EIP1559Denominator: 10} + return &conf + }() + ecotoneTestConfig = func() *params.ChainConfig { + conf := *bedrockGenesisTestConfig // copy the config + time := uint64(0) + conf.EcotoneTime = &time + return &conf + }() + isthmusTestConfig = func() *params.ChainConfig { + conf := *ecotoneTestConfig // copy the config + time := uint64(0) + conf.FjordTime = &time + conf.GraniteTime = &time + conf.HoloceneTime = &time + conf.IsthmusTime = &time + return &conf + }() + jovianTestConfig = func() *params.ChainConfig { + conf := *isthmusTestConfig // copy the config + time := uint64(0) + conf.JovianTime = &time + return &conf + }() + + depositReceiptNoNonce = &Receipt{ + Status: ReceiptStatusFailed, + CumulativeGasUsed: 1, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + { + Address: common.BytesToAddress([]byte{0x01, 0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + }, + Type: DepositTxType, + } + nonce = uint64(1234) + depositReceiptWithNonce = &Receipt{ + Status: ReceiptStatusFailed, + CumulativeGasUsed: 1, + DepositNonce: &nonce, + DepositReceiptVersion: nil, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + { + Address: common.BytesToAddress([]byte{0x01, 0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + }, + Type: DepositTxType, + } + version = CanyonDepositReceiptVersion + depositReceiptWithNonceAndVersion = &Receipt{ + Status: ReceiptStatusFailed, + CumulativeGasUsed: 1, + DepositNonce: &nonce, + DepositReceiptVersion: &version, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + { + Address: common.BytesToAddress([]byte{0x01, 0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + }, + Type: DepositTxType, + } + + daFootprintGasScalar = uint16(400) +) + +func clearComputedFieldsOnOPStackReceipts(receipts []*Receipt) []*Receipt { + receipts = clearComputedFieldsOnReceipts(receipts) + for _, receipt := range receipts { + receipt.L1GasPrice = nil + receipt.L1BlobBaseFee = nil + receipt.L1GasUsed = nil + receipt.L1Fee = nil + receipt.FeeScalar = nil + receipt.L1BaseFeeScalar = nil + receipt.L1BlobBaseFeeScalar = nil + receipt.OperatorFeeScalar = nil + receipt.OperatorFeeConstant = nil + receipt.DAFootprintGasScalar = nil + } + return receipts +} + +func getOptimismTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1GasUsed, l1Fee *big.Int, feeScalar *big.Float) ([]*Transaction, []*Receipt) { + // Create a few transactions to have receipts for + txs := Transactions{ + NewTx(&DepositTx{ + To: nil, // contract creation + Value: big.NewInt(6), + Gas: 50, + Data: l1AttributesPayload, + }), + emptyTx, + } + + // Create the corresponding receipts + receipts := Receipts{ + &Receipt{ + Type: DepositTxType, + PostState: common.Hash{5}.Bytes(), + CumulativeGasUsed: 50 + 15, + Logs: []*Log{ + { + Address: common.BytesToAddress([]byte{0x33}), + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[0].Hash(), + TxIndex: 0, + BlockHash: blockHash, + Index: 0, + }, + { + Address: common.BytesToAddress([]byte{0x03, 0x33}), + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[0].Hash(), + TxIndex: 0, + BlockHash: blockHash, + Index: 1, + }, + }, + TxHash: txs[0].Hash(), + ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"), + GasUsed: 65, + EffectiveGasPrice: big.NewInt(0), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 0, + DepositNonce: &depNonce1, + }, + &Receipt{ + Type: LegacyTxType, + EffectiveGasPrice: big.NewInt(0), + PostState: common.Hash{4}.Bytes(), + CumulativeGasUsed: 10, + Logs: []*Log{}, + // derived fields: + TxHash: txs[1].Hash(), + GasUsed: 18446744073709551561, + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 1, + L1GasPrice: l1GasPrice, + L1GasUsed: l1GasUsed, + L1Fee: l1Fee, + FeeScalar: feeScalar, + }, + } + for _, receipt := range receipts { + receipt.Bloom = CreateBloom(receipt) + } + + return txs, receipts +} + +func getOptimismEcotoneTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar *uint64) ([]*Transaction, []*Receipt) { + txs, receipts := getOptimismTxReceipts(l1AttributesPayload, l1GasPrice, l1GasUsed, l1Fee, nil) + receipts[1].L1BlobBaseFee = l1BlobBaseFee + receipts[1].L1BaseFeeScalar = baseFeeScalar + receipts[1].L1BlobBaseFeeScalar = blobBaseFeeScalar + return txs, receipts +} + +func getOptimismIsthmusTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar, operatorFeeScalar, operatorFeeConstant *uint64) ([]*Transaction, []*Receipt) { + txs, receipts := getOptimismEcotoneTxReceipts(l1AttributesPayload, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee, baseFeeScalar, blobBaseFeeScalar) + receipts[1].OperatorFeeScalar = operatorFeeScalar + receipts[1].OperatorFeeConstant = operatorFeeConstant + return txs, receipts +} + +func getOptimismJovianTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar, operatorFeeScalar, operatorFeeConstant, daFootprintGasScalar *uint64) ([]*Transaction, []*Receipt) { + txs, receipts := getOptimismIsthmusTxReceipts(l1AttributesPayload, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee, baseFeeScalar, blobBaseFeeScalar, operatorFeeScalar, operatorFeeConstant) + receipts[1].DAFootprintGasScalar = daFootprintGasScalar + if daFootprintGasScalar != nil { + receipts[1].BlobGasUsed = *daFootprintGasScalar * txs[1].RollupCostData().EstimatedDASize().Uint64() + } + return txs, receipts +} + +func TestDeriveOptimismBedrockTxReceipts(t *testing.T) { + // Bedrock style l1 attributes with L1Scalar=7_000_000 (becomes 7 after division), L1Overhead=50, L1BaseFee=1000*1e6 + payload := common.Hex2Bytes("015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0") + // the parameters we use below are defined in rollup_test.go + l1GasPrice := baseFee + l1GasUsed := bedrockGas + feeScalar := big.NewFloat(float64(scalar.Uint64() / 1e6)) + l1Fee := bedrockFee + txs, receipts := getOptimismTxReceipts(payload, l1GasPrice, l1GasUsed, l1Fee, feeScalar) + + // Re-derive receipts. + baseFee := big.NewInt(1000) + derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts) + err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.NoError(t, err) + checkBedrockReceipts(t, receipts, derivedReceipts) + + // Should get same result with the Ecotone config because it will assume this is "first ecotone block" + // if it sees the bedrock style L1 attributes. + err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.NoError(t, err) + checkBedrockReceipts(t, receipts, derivedReceipts) +} + +func TestDeriveOptimismEcotoneTxReceipts(t *testing.T) { + // Ecotone style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6 + payload := common.Hex2Bytes("440a5e20000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2") + // the parameters we use below are defined in rollup_test.go + baseFeeScalarUint64 := baseFeeScalar.Uint64() + blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64() + txs, receipts := getOptimismEcotoneTxReceipts(payload, baseFee, blobBaseFee, ecotoneGas, ecotoneFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64) + + // Re-derive receipts. + baseFee := big.NewInt(1000) + derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts) + // Should error out if we try to process this with a pre-Ecotone config + err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.Error(t, err) + + err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.NoError(t, err) + diffReceipts(t, receipts, derivedReceipts) +} + +func TestDeriveOptimismIsthmusTxReceipts(t *testing.T) { + // Isthmus style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=1439103868, operatorFeeConstant=1256417826609331460 + payload := common.Hex2Bytes("098999be000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d255c6fb7c116fb15b44847d04") + // the parameters we use below are defined in rollup_test.go + baseFeeScalarUint64 := baseFeeScalar.Uint64() + blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64() + operatorFeeScalarUint64 := operatorFeeScalar.Uint64() + operatorFeeConstantUint64 := operatorFeeConstant.Uint64() + txs, receipts := getOptimismIsthmusTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, &operatorFeeScalarUint64, &operatorFeeConstantUint64) + + // Re-derive receipts. + baseFee := big.NewInt(1000) + derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts) + // Should error out if we try to process this with a pre-Isthmus config + err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.Error(t, err) + + err = Receipts(derivedReceipts).DeriveFields(isthmusTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.NoError(t, err) + diffReceipts(t, receipts, derivedReceipts) +} + +func TestDeriveOptimismIsthmusTxReceiptsNoOperatorFee(t *testing.T) { + // Isthmus style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=0, operatorFeeConstant=0 + payload := common.Hex2Bytes("098999be000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000") + // the parameters we use below are defined in rollup_test.go + baseFeeScalarUint64 := baseFeeScalar.Uint64() + blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64() + txs, receipts := getOptimismIsthmusTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, nil, nil) + + // Re-derive receipts. + baseFee := big.NewInt(1000) + derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts) + // Should error out if we try to process this with a pre-Isthmus config + err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.Error(t, err) + + err = Receipts(derivedReceipts).DeriveFields(isthmusTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.NoError(t, err) + diffReceipts(t, receipts, derivedReceipts) +} + +func TestDeriveOptimismJovianTxReceipts(t *testing.T) { + // Jovian style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=1439103868, operatorFeeConstant=1256417826609331460, daFootprintGasScalar=400 + payload := common.Hex2Bytes("3db6be2b000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d255c6fb7c116fb15b44847d040190") + // the parameters we use below are defined in rollup_test.go + baseFeeScalarUint64 := baseFeeScalar.Uint64() + blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64() + operatorFeeScalarUint64 := operatorFeeScalar.Uint64() + operatorFeeConstantUint64 := operatorFeeConstant.Uint64() + daFootprintGasScalarUint64 := uint64(daFootprintGasScalar) + txs, receipts := getOptimismJovianTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, &operatorFeeScalarUint64, &operatorFeeConstantUint64, &daFootprintGasScalarUint64) + + // Re-derive receipts. + baseFee := big.NewInt(1000) + derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts) + // Should error out if we try to process this with a pre-Jovian config + err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.Error(t, err) + + err = Receipts(derivedReceipts).DeriveFields(jovianTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) + require.NoError(t, err) + diffReceipts(t, receipts, derivedReceipts) +} + +func diffReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) { + // Check diff of receipts against derivedReceipts. + r1, err := json.MarshalIndent(receipts, "", " ") + if err != nil { + t.Fatal("error marshaling input receipts:", err) + } + r2, err := json.MarshalIndent(derivedReceipts, "", " ") + if err != nil { + t.Fatal("error marshaling derived receipts:", err) + } + d := diff.Diff(string(r1), string(r2)) + if d != "" { + t.Fatal("receipts differ:", d) + } +} + +func checkBedrockReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) { + diffReceipts(t, receipts, derivedReceipts) + + // Check that we preserved the invariant: l1Fee = l1GasPrice * l1GasUsed * l1FeeScalar + // but with more difficult int math... + l2Rcpt := derivedReceipts[1] + l1GasCost := new(big.Int).Mul(l2Rcpt.L1GasPrice, l2Rcpt.L1GasUsed) + l1Fee := new(big.Float).Mul(new(big.Float).SetInt(l1GasCost), l2Rcpt.FeeScalar) + require.Equal(t, new(big.Float).SetInt(l2Rcpt.L1Fee), l1Fee) +} + +func TestBedrockDepositReceiptUnchanged(t *testing.T) { + expectedRlp := common.FromHex("7EF90156A003000000000000000000000000000000000000000000000000000000000000000AB9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000F0D7940000000000000000000000000000000000000033C001D7940000000000000000000000000000000000000333C002") + // Deposit receipt with no nonce + receipt := &Receipt{ + Type: DepositTxType, + PostState: common.Hash{3}.Bytes(), + CumulativeGasUsed: 10, + Logs: []*Log{ + {Address: common.BytesToAddress([]byte{0x33}), Data: []byte{1}, Topics: []common.Hash{}}, + {Address: common.BytesToAddress([]byte{0x03, 0x33}), Data: []byte{2}, Topics: []common.Hash{}}, + }, + TxHash: common.Hash{}, + ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}), + GasUsed: 4, + } + + rlp, err := receipt.MarshalBinary() + require.NoError(t, err) + require.Equal(t, expectedRlp, rlp) + + // Consensus values should be unchanged after reparsing + parsed := new(Receipt) + err = parsed.UnmarshalBinary(rlp) + require.NoError(t, err) + require.Equal(t, receipt.Status, parsed.Status) + require.Equal(t, receipt.CumulativeGasUsed, parsed.CumulativeGasUsed) + require.Equal(t, receipt.Bloom, parsed.Bloom) + require.EqualValues(t, receipt.Logs, parsed.Logs) + // And still shouldn't have a nonce + require.Nil(t, parsed.DepositNonce) + // ..or a deposit nonce + require.Nil(t, parsed.DepositReceiptVersion) +} + +// Regolith introduced an inconsistency in behavior between EncodeIndex and MarshalBinary for a +// deposit transaction receipt. TestReceiptEncodeIndexBugIsEnshrined makes sure this difference is +// preserved for backwards compatibility purposes, but also that there is no discrepancy for the +// post-Canyon encoding. +func TestReceiptEncodeIndexBugIsEnshrined(t *testing.T) { + // Check that a post-Regolith, pre-Canyon receipt produces the expected difference between + // EncodeIndex and MarshalBinary. + buf := new(bytes.Buffer) + receipts := Receipts{depositReceiptWithNonce} + receipts.EncodeIndex(0, buf) + indexBytes := buf.Bytes() + + regularBytes, _ := receipts[0].MarshalBinary() + + require.NotEqual(t, indexBytes, regularBytes) + + // Confirm the buggy encoding is as expected, which means it should encode as if it had no + // nonce specified (like that of a non-deposit receipt, whose encoding would differ only in the + // type byte). + buf.Reset() + tempReceipt := *depositReceiptWithNonce + tempReceipt.Type = eip1559Receipt.Type + buggyBytes, _ := tempReceipt.MarshalBinary() + + require.Equal(t, indexBytes[1:], buggyBytes[1:]) + + // check that the post-Canyon encoding has no differences between EncodeIndex and + // MarshalBinary. + buf.Reset() + receipts = Receipts{depositReceiptWithNonceAndVersion} + receipts.EncodeIndex(0, buf) + indexBytes = buf.Bytes() + + regularBytes, _ = receipts[0].MarshalBinary() + + require.Equal(t, indexBytes, regularBytes) + + // Check that bumping the nonce post-canyon changes the hash + bumpedReceipt := *depositReceiptWithNonceAndVersion + bumpedNonce := nonce + 1 + bumpedReceipt.DepositNonce = &bumpedNonce + bumpedBytes, _ := bumpedReceipt.MarshalBinary() + require.NotEqual(t, regularBytes, bumpedBytes) +} + +func TestRoundTripReceipt(t *testing.T) { + tests := []struct { + name string + rcpt *Receipt + }{ + {name: "Legacy", rcpt: legacyReceipt}, + {name: "AccessList", rcpt: accessListReceipt}, + {name: "EIP1559", rcpt: eip1559Receipt}, + {name: "DepositNoNonce", rcpt: depositReceiptNoNonce}, + {name: "DepositWithNonce", rcpt: depositReceiptWithNonce}, + {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + data, err := test.rcpt.MarshalBinary() + require.NoError(t, err) + + d := &Receipt{} + err = d.UnmarshalBinary(data) + require.NoError(t, err) + require.Equal(t, test.rcpt, d) + require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce) + require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion) + }) + + t.Run(fmt.Sprintf("%sRejectExtraData", test.name), func(t *testing.T) { + data, err := test.rcpt.MarshalBinary() + require.NoError(t, err) + data = append(data, 1, 2, 3, 4) + d := &Receipt{} + err = d.UnmarshalBinary(data) + require.Error(t, err) + }) + } +} + +func TestRoundTripReceiptForStorage(t *testing.T) { + tests := []struct { + name string + rcpt *Receipt + }{ + {name: "Legacy", rcpt: legacyReceipt}, + {name: "AccessList", rcpt: accessListReceipt}, + {name: "EIP1559", rcpt: eip1559Receipt}, + {name: "DepositNoNonce", rcpt: depositReceiptNoNonce}, + {name: "DepositWithNonce", rcpt: depositReceiptWithNonce}, + {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + data, err := rlp.EncodeToBytes((*ReceiptForStorage)(test.rcpt)) + require.NoError(t, err) + + d := &ReceiptForStorage{} + err = rlp.DecodeBytes(data, d) + require.NoError(t, err) + // Only check the stored fields - the others are derived later + require.Equal(t, test.rcpt.Status, d.Status) + require.Equal(t, test.rcpt.CumulativeGasUsed, d.CumulativeGasUsed) + require.Equal(t, test.rcpt.Logs, d.Logs) + require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce) + require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion) + }) + } +} diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go index 85c93dc375..9d513e0039 100644 --- a/core/types/receipt_test.go +++ b/core/types/receipt_test.go @@ -19,7 +19,6 @@ package types import ( "bytes" "encoding/json" - "fmt" "math" "math/big" "reflect" @@ -31,30 +30,9 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" "github.com/kylelemons/godebug/diff" - "github.com/stretchr/testify/require" ) var ( - bedrockGenesisTestConfig = func() *params.ChainConfig { - conf := *params.AllCliqueProtocolChanges // copy the config - conf.Clique = nil - conf.BedrockBlock = big.NewInt(0) - conf.Optimism = ¶ms.OptimismConfig{EIP1559Elasticity: 50, EIP1559Denominator: 10} - return &conf - }() - ecotoneTestConfig = func() *params.ChainConfig { - conf := *bedrockGenesisTestConfig // copy the config - time := uint64(0) - conf.EcotoneTime = &time - return &conf - }() - isthmusTestConfig = func() *params.ChainConfig { - conf := *bedrockGenesisTestConfig // copy the config - time := uint64(0) - conf.IsthmusTime = &time - return &conf - }() - legacyReceipt = &Receipt{ Status: ReceiptStatusFailed, CumulativeGasUsed: 1, @@ -105,63 +83,6 @@ var ( }, Type: DynamicFeeTxType, } - depositReceiptNoNonce = &Receipt{ - Status: ReceiptStatusFailed, - CumulativeGasUsed: 1, - Logs: []*Log{ - { - Address: common.BytesToAddress([]byte{0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - { - Address: common.BytesToAddress([]byte{0x01, 0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - }, - Type: DepositTxType, - } - nonce = uint64(1234) - depositReceiptWithNonce = &Receipt{ - Status: ReceiptStatusFailed, - CumulativeGasUsed: 1, - DepositNonce: &nonce, - DepositReceiptVersion: nil, - Logs: []*Log{ - { - Address: common.BytesToAddress([]byte{0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - { - Address: common.BytesToAddress([]byte{0x01, 0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - }, - Type: DepositTxType, - } - version = CanyonDepositReceiptVersion - depositReceiptWithNonceAndVersion = &Receipt{ - Status: ReceiptStatusFailed, - CumulativeGasUsed: 1, - DepositNonce: &nonce, - DepositReceiptVersion: &version, - Logs: []*Log{ - { - Address: common.BytesToAddress([]byte{0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - { - Address: common.BytesToAddress([]byte{0x01, 0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - }, - Type: DepositTxType, - } // Create a few transactions to have receipts for to2 = common.HexToAddress("0x2") @@ -240,11 +161,13 @@ var ( Gas: 60, }), } + + blockNumber = big.NewInt(1) + blockTime = uint64(2) + blockHash = common.BytesToHash([]byte{0x03, 0x14}) + depNonce1 = uint64(7) depNonce2 = uint64(8) - blockNumber = big.NewInt(1) - blockTime = uint64(2) - blockHash = common.BytesToHash([]byte{0x03, 0x14}) canyonDepositReceiptVersion = CanyonDepositReceiptVersion ) @@ -493,11 +416,11 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) { // Tests that receipt data can be correctly derived from the contextual infos func TestDeriveFields(t *testing.T) { // Re-derive receipts. - baseFee := big.NewInt(1000) + basefee := big.NewInt(1000) blobGasPrice := big.NewInt(920) receipts := getTestReceipts() derivedReceipts := clearComputedFieldsOnReceipts(receipts) - err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, baseFee, blobGasPrice, txs) + err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, blobGasPrice, txs) if err != nil { t.Fatalf("DeriveFields(...) = %v, want ", err) } @@ -728,499 +651,3 @@ func clearComputedFieldsOnLogs(logs []*Log) []*Log { } return l } - -func getOptimismEcotoneTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobGasPrice, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar *uint64) ([]*Transaction, []*Receipt) { - // Create a few transactions to have receipts for - txs := Transactions{ - NewTx(&DepositTx{ - To: nil, // contract creation - Value: big.NewInt(6), - Gas: 50, - Data: l1AttributesPayload, - }), - emptyTx, - } - - // Create the corresponding receipts - receipts := Receipts{ - &Receipt{ - Type: DepositTxType, - PostState: common.Hash{5}.Bytes(), - CumulativeGasUsed: 50 + 15, - Logs: []*Log{ - { - Address: common.BytesToAddress([]byte{0x33}), - // derived fields: - BlockNumber: blockNumber.Uint64(), - TxHash: txs[0].Hash(), - TxIndex: 0, - BlockHash: blockHash, - Index: 0, - }, - { - Address: common.BytesToAddress([]byte{0x03, 0x33}), - // derived fields: - BlockNumber: blockNumber.Uint64(), - TxHash: txs[0].Hash(), - TxIndex: 0, - BlockHash: blockHash, - Index: 1, - }, - }, - TxHash: txs[0].Hash(), - ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"), - GasUsed: 65, - EffectiveGasPrice: big.NewInt(0), - BlockHash: blockHash, - BlockNumber: blockNumber, - TransactionIndex: 0, - DepositNonce: &depNonce1, - }, - &Receipt{ - Type: LegacyTxType, - EffectiveGasPrice: big.NewInt(0), - PostState: common.Hash{4}.Bytes(), - CumulativeGasUsed: 10, - Logs: []*Log{}, - // derived fields: - TxHash: txs[1].Hash(), - GasUsed: 18446744073709551561, - BlockHash: blockHash, - BlockNumber: blockNumber, - TransactionIndex: 1, - L1GasPrice: l1GasPrice, - L1BlobBaseFee: l1BlobGasPrice, - L1GasUsed: l1GasUsed, - L1Fee: l1Fee, - L1BaseFeeScalar: baseFeeScalar, - L1BlobBaseFeeScalar: blobBaseFeeScalar, - }, - } - for _, receipt := range receipts { - receipt.Bloom = CreateBloom(receipt) - } - - return txs, receipts -} - -func getOptimismIsthmusTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobGasPrice, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar, operatorFeeScalar, operatorFeeConstant *uint64) ([]*Transaction, []*Receipt) { - // Create a few transactions to have receipts for - txs := Transactions{ - NewTx(&DepositTx{ - To: nil, // contract creation - Value: big.NewInt(6), - Gas: 50, - Data: l1AttributesPayload, - }), - emptyTx, - } - - // Create the corresponding receipts - receipts := Receipts{ - &Receipt{ - Type: DepositTxType, - PostState: common.Hash{5}.Bytes(), - CumulativeGasUsed: 50 + 15, - Logs: []*Log{ - { - Address: common.BytesToAddress([]byte{0x33}), - // derived fields: - BlockNumber: blockNumber.Uint64(), - TxHash: txs[0].Hash(), - TxIndex: 0, - BlockHash: blockHash, - Index: 0, - }, - { - Address: common.BytesToAddress([]byte{0x03, 0x33}), - // derived fields: - BlockNumber: blockNumber.Uint64(), - TxHash: txs[0].Hash(), - TxIndex: 0, - BlockHash: blockHash, - Index: 1, - }, - }, - TxHash: txs[0].Hash(), - ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"), - GasUsed: 65, - EffectiveGasPrice: big.NewInt(0), - BlockHash: blockHash, - BlockNumber: blockNumber, - TransactionIndex: 0, - DepositNonce: &depNonce1, - }, - &Receipt{ - Type: LegacyTxType, - EffectiveGasPrice: big.NewInt(0), - PostState: common.Hash{4}.Bytes(), - CumulativeGasUsed: 10, - Logs: []*Log{}, - // derived fields: - TxHash: txs[1].Hash(), - GasUsed: 18446744073709551561, - BlockHash: blockHash, - BlockNumber: blockNumber, - TransactionIndex: 1, - L1GasPrice: l1GasPrice, - L1BlobBaseFee: l1BlobGasPrice, - L1GasUsed: l1GasUsed, - L1Fee: l1Fee, - L1BaseFeeScalar: baseFeeScalar, - L1BlobBaseFeeScalar: blobBaseFeeScalar, - OperatorFeeScalar: operatorFeeScalar, - OperatorFeeConstant: operatorFeeConstant, - }, - } - for _, receipt := range receipts { - receipt.Bloom = CreateBloom(receipt) - } - - return txs, receipts -} - -func getOptimismTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1GasUsed, l1Fee *big.Int, feeScalar *big.Float) ([]*Transaction, []*Receipt) { - // Create a few transactions to have receipts for - txs := Transactions{ - NewTx(&DepositTx{ - To: nil, // contract creation - Value: big.NewInt(6), - Gas: 50, - Data: l1AttributesPayload, - }), - emptyTx, - } - - // Create the corresponding receipts - receipts := Receipts{ - &Receipt{ - Type: DepositTxType, - PostState: common.Hash{5}.Bytes(), - CumulativeGasUsed: 50 + 15, - Logs: []*Log{ - { - Address: common.BytesToAddress([]byte{0x33}), - // derived fields: - BlockNumber: blockNumber.Uint64(), - TxHash: txs[0].Hash(), - TxIndex: 0, - BlockHash: blockHash, - Index: 0, - }, - { - Address: common.BytesToAddress([]byte{0x03, 0x33}), - // derived fields: - BlockNumber: blockNumber.Uint64(), - TxHash: txs[0].Hash(), - TxIndex: 0, - BlockHash: blockHash, - Index: 1, - }, - }, - TxHash: txs[0].Hash(), - ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"), - GasUsed: 65, - EffectiveGasPrice: big.NewInt(0), - BlockHash: blockHash, - BlockNumber: blockNumber, - TransactionIndex: 0, - DepositNonce: &depNonce1, - }, - &Receipt{ - Type: LegacyTxType, - EffectiveGasPrice: big.NewInt(0), - PostState: common.Hash{4}.Bytes(), - CumulativeGasUsed: 10, - Logs: []*Log{}, - // derived fields: - TxHash: txs[1].Hash(), - GasUsed: 18446744073709551561, - BlockHash: blockHash, - BlockNumber: blockNumber, - TransactionIndex: 1, - L1GasPrice: l1GasPrice, - L1GasUsed: l1GasUsed, - L1Fee: l1Fee, - FeeScalar: feeScalar, - }, - } - for _, receipt := range receipts { - receipt.Bloom = CreateBloom(receipt) - } - - return txs, receipts -} - -func TestDeriveOptimismBedrockTxReceipts(t *testing.T) { - // Bedrock style l1 attributes with L1Scalar=7_000_000 (becomes 7 after division), L1Overhead=50, L1BaseFee=1000*1e6 - payload := common.Hex2Bytes("015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0") - // the parameters we use below are defined in rollup_test.go - l1GasPrice := baseFee - l1GasUsed := bedrockGas - feeScalar := big.NewFloat(float64(scalar.Uint64() / 1e6)) - l1Fee := bedrockFee - txs, receipts := getOptimismTxReceipts(payload, l1GasPrice, l1GasUsed, l1Fee, feeScalar) - - // Re-derive receipts. - baseFee := big.NewInt(1000) - derivedReceipts := clearComputedFieldsOnReceipts(receipts) - err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) - if err != nil { - t.Fatalf("DeriveFields(...) = %v, want ", err) - } - checkBedrockReceipts(t, receipts, derivedReceipts) - - // Should get same result with the Ecotone config because it will assume this is "first ecotone block" - // if it sees the bedrock style L1 attributes. - err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) - if err != nil { - t.Fatalf("DeriveFields(...) = %v, want ", err) - } - checkBedrockReceipts(t, receipts, derivedReceipts) -} - -func TestDeriveOptimismEcotoneTxReceipts(t *testing.T) { - // Ecotone style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6 - payload := common.Hex2Bytes("440a5e20000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2") - // the parameters we use below are defined in rollup_test.go - baseFeeScalarUint64 := baseFeeScalar.Uint64() - blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64() - txs, receipts := getOptimismEcotoneTxReceipts(payload, baseFee, blobBaseFee, ecotoneGas, ecotoneFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64) - - // Re-derive receipts. - baseFee := big.NewInt(1000) - derivedReceipts := clearComputedFieldsOnReceipts(receipts) - // Should error out if we try to process this with a pre-Ecotone config - err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) - if err == nil { - t.Fatalf("expected error from deriving ecotone receipts with pre-ecotone config, got none") - } - - err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) - if err != nil { - t.Fatalf("DeriveFields(...) = %v, want ", err) - } - diffReceipts(t, receipts, derivedReceipts) -} - -func TestDeriveOptimismIsthmusTxReceipts(t *testing.T) { - // Isthmus style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=7, operatorFeeConstant=9 - payload := common.Hex2Bytes("098999be000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d255c6fb7c116fb15b44847d04") - // the parameters we use below are defined in rollup_test.go - baseFeeScalarUint64 := baseFeeScalar.Uint64() - blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64() - operatorFeeScalarUint64 := operatorFeeScalar.Uint64() - operatorFeeConstantUint64 := operatorFeeConstant.Uint64() - txs, receipts := getOptimismIsthmusTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, &operatorFeeScalarUint64, &operatorFeeConstantUint64) - - // Re-derive receipts. - baseFee := big.NewInt(1000) - derivedReceipts := clearComputedFieldsOnReceipts(receipts) - // Should error out if we try to process this with a pre-Isthmus config - err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) - if err == nil { - t.Fatalf("expected error from deriving isthmus receipts with pre-isthmus config, got none") - } - - err = Receipts(derivedReceipts).DeriveFields(isthmusTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) - if err != nil { - t.Fatalf("DeriveFields(...) = %v, want ", err) - } - diffReceipts(t, receipts, derivedReceipts) -} - -func TestDeriveOptimismIsthmusTxReceiptsNoOperatorFee(t *testing.T) { - // Isthmus style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=7, operatorFeeConstant=9 - payload := common.Hex2Bytes("098999be000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000") - // the parameters we use below are defined in rollup_test.go - baseFeeScalarUint64 := baseFeeScalar.Uint64() - blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64() - txs, receipts := getOptimismIsthmusTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, nil, nil) - - // Re-derive receipts. - baseFee := big.NewInt(1000) - derivedReceipts := clearComputedFieldsOnReceipts(receipts) - // Should error out if we try to process this with a pre-Isthmus config - err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) - if err == nil { - t.Fatalf("expected error from deriving isthmus receipts with pre-isthmus config, got none") - } - - err = Receipts(derivedReceipts).DeriveFields(isthmusTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs) - if err != nil { - t.Fatalf("DeriveFields(...) = %v, want ", err) - } - diffReceipts(t, receipts, derivedReceipts) -} - -func diffReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) { - // Check diff of receipts against derivedReceipts. - r1, err := json.MarshalIndent(receipts, "", " ") - if err != nil { - t.Fatal("error marshaling input receipts:", err) - } - r2, err := json.MarshalIndent(derivedReceipts, "", " ") - if err != nil { - t.Fatal("error marshaling derived receipts:", err) - } - d := diff.Diff(string(r1), string(r2)) - if d != "" { - t.Fatal("receipts differ:", d) - } -} - -func checkBedrockReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) { - diffReceipts(t, receipts, derivedReceipts) - - // Check that we preserved the invariant: l1Fee = l1GasPrice * l1GasUsed * l1FeeScalar - // but with more difficult int math... - l2Rcpt := derivedReceipts[1] - l1GasCost := new(big.Int).Mul(l2Rcpt.L1GasPrice, l2Rcpt.L1GasUsed) - l1Fee := new(big.Float).Mul(new(big.Float).SetInt(l1GasCost), l2Rcpt.FeeScalar) - require.Equal(t, new(big.Float).SetInt(l2Rcpt.L1Fee), l1Fee) -} - -func TestBedrockDepositReceiptUnchanged(t *testing.T) { - expectedRlp := common.FromHex("7EF90156A003000000000000000000000000000000000000000000000000000000000000000AB9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000F0D7940000000000000000000000000000000000000033C001D7940000000000000000000000000000000000000333C002") - // Deposit receipt with no nonce - receipt := &Receipt{ - Type: DepositTxType, - PostState: common.Hash{3}.Bytes(), - CumulativeGasUsed: 10, - Logs: []*Log{ - {Address: common.BytesToAddress([]byte{0x33}), Data: []byte{1}, Topics: []common.Hash{}}, - {Address: common.BytesToAddress([]byte{0x03, 0x33}), Data: []byte{2}, Topics: []common.Hash{}}, - }, - TxHash: common.Hash{}, - ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}), - GasUsed: 4, - } - - rlp, err := receipt.MarshalBinary() - require.NoError(t, err) - require.Equal(t, expectedRlp, rlp) - - // Consensus values should be unchanged after reparsing - parsed := new(Receipt) - err = parsed.UnmarshalBinary(rlp) - require.NoError(t, err) - require.Equal(t, receipt.Status, parsed.Status) - require.Equal(t, receipt.CumulativeGasUsed, parsed.CumulativeGasUsed) - require.Equal(t, receipt.Bloom, parsed.Bloom) - require.EqualValues(t, receipt.Logs, parsed.Logs) - // And still shouldn't have a nonce - require.Nil(t, parsed.DepositNonce) - // ..or a deposit nonce - require.Nil(t, parsed.DepositReceiptVersion) -} - -// Regolith introduced an inconsistency in behavior between EncodeIndex and MarshalBinary for a -// deposit transaction receipt. TestReceiptEncodeIndexBugIsEnshrined makes sure this difference is -// preserved for backwards compatibility purposes, but also that there is no discrepancy for the -// post-Canyon encoding. -func TestReceiptEncodeIndexBugIsEnshrined(t *testing.T) { - // Check that a post-Regolith, pre-Canyon receipt produces the expected difference between - // EncodeIndex and MarshalBinary. - buf := new(bytes.Buffer) - receipts := Receipts{depositReceiptWithNonce} - receipts.EncodeIndex(0, buf) - indexBytes := buf.Bytes() - - regularBytes, _ := receipts[0].MarshalBinary() - - require.NotEqual(t, indexBytes, regularBytes) - - // Confirm the buggy encoding is as expected, which means it should encode as if it had no - // nonce specified (like that of a non-deposit receipt, whose encoding would differ only in the - // type byte). - buf.Reset() - tempReceipt := *depositReceiptWithNonce - tempReceipt.Type = eip1559Receipt.Type - buggyBytes, _ := tempReceipt.MarshalBinary() - - require.Equal(t, indexBytes[1:], buggyBytes[1:]) - - // check that the post-Canyon encoding has no differences between EncodeIndex and - // MarshalBinary. - buf.Reset() - receipts = Receipts{depositReceiptWithNonceAndVersion} - receipts.EncodeIndex(0, buf) - indexBytes = buf.Bytes() - - regularBytes, _ = receipts[0].MarshalBinary() - - require.Equal(t, indexBytes, regularBytes) - - // Check that bumping the nonce post-canyon changes the hash - bumpedReceipt := *depositReceiptWithNonceAndVersion - bumpedNonce := nonce + 1 - bumpedReceipt.DepositNonce = &bumpedNonce - bumpedBytes, _ := bumpedReceipt.MarshalBinary() - require.NotEqual(t, regularBytes, bumpedBytes) -} - -func TestRoundTripReceipt(t *testing.T) { - tests := []struct { - name string - rcpt *Receipt - }{ - {name: "Legacy", rcpt: legacyReceipt}, - {name: "AccessList", rcpt: accessListReceipt}, - {name: "EIP1559", rcpt: eip1559Receipt}, - {name: "DepositNoNonce", rcpt: depositReceiptNoNonce}, - {name: "DepositWithNonce", rcpt: depositReceiptWithNonce}, - {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion}, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - data, err := test.rcpt.MarshalBinary() - require.NoError(t, err) - - d := &Receipt{} - err = d.UnmarshalBinary(data) - require.NoError(t, err) - require.Equal(t, test.rcpt, d) - require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce) - require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion) - }) - - t.Run(fmt.Sprintf("%sRejectExtraData", test.name), func(t *testing.T) { - data, err := test.rcpt.MarshalBinary() - require.NoError(t, err) - data = append(data, 1, 2, 3, 4) - d := &Receipt{} - err = d.UnmarshalBinary(data) - require.Error(t, err) - }) - } -} - -func TestRoundTripReceiptForStorage(t *testing.T) { - tests := []struct { - name string - rcpt *Receipt - }{ - {name: "Legacy", rcpt: legacyReceipt}, - {name: "AccessList", rcpt: accessListReceipt}, - {name: "EIP1559", rcpt: eip1559Receipt}, - {name: "DepositNoNonce", rcpt: depositReceiptNoNonce}, - {name: "DepositWithNonce", rcpt: depositReceiptWithNonce}, - {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion}, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - data, err := rlp.EncodeToBytes((*ReceiptForStorage)(test.rcpt)) - require.NoError(t, err) - - d := &ReceiptForStorage{} - err = rlp.DecodeBytes(data, d) - require.NoError(t, err) - // Only check the stored fields - the others are derived later - require.Equal(t, test.rcpt.Status, d.Status) - require.Equal(t, test.rcpt.CumulativeGasUsed, d.CumulativeGasUsed) - require.Equal(t, test.rcpt.Logs, d.Logs) - require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce) - require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion) - }) - } -} diff --git a/core/types/rollup_cost.go b/core/types/rollup_cost.go index 43a028ddd3..053951448a 100644 --- a/core/types/rollup_cost.go +++ b/core/types/rollup_cost.go @@ -19,6 +19,7 @@ package types import ( "bytes" "encoding/binary" + "errors" "fmt" "math/big" @@ -41,6 +42,9 @@ const ( // array. baseFeeScalar is in the first four bytes of the segment, blobBaseFeeScalar the next // four. scalarSectionStart = 32 - BaseFeeScalarSlotOffset - 4 + + IsthmusL1AttributesLen = 176 + JovianL1AttributesLen = 178 ) func init() { @@ -57,6 +61,8 @@ var ( EcotoneL1AttributesSelector = []byte{0x44, 0x0a, 0x5e, 0x20} // IsthmusL1AttributesSelector is the selector indicating Isthmus style L1 gas attributes. IsthmusL1AttributesSelector = []byte{0x09, 0x89, 0x99, 0xbe} + // JovianL1AttributesSelector is the selector indicating Jovian style L1 gas attributes. + JovianL1AttributesSelector = []byte{0x3d, 0xb6, 0xbe, 0x2b} // L1BlockAddr is the address of the L1Block contract which stores the L1 gas attributes. L1BlockAddr = common.HexToAddress("0x4200000000000000000000000000000000000015") @@ -79,6 +85,7 @@ var ( // attributes OperatorFeeParamsSlot = common.BigToHash(big.NewInt(8)) + oneHundred = big.NewInt(100) oneMillion = big.NewInt(1_000_000) ecotoneDivisor = big.NewInt(1_000_000 * 16) fjordDivisor = big.NewInt(1_000_000_000_000) @@ -234,7 +241,11 @@ func NewOperatorCostFunc(config *params.ChainConfig, statedb StateGetter) Operat } operatorFeeScalar, operatorFeeConstant := ExtractOperatorFeeParams(operatorFeeParams) - return newOperatorCostFunc(operatorFeeScalar, operatorFeeConstant) + // Return the Operator Fee fix version if the feature is active + if config.IsOperatorFeeFix(blockTime) { + return newOperatorCostFuncOperatorFeeFix(operatorFeeScalar, operatorFeeConstant) + } + return newOperatorCostFuncIsthmus(operatorFeeScalar, operatorFeeConstant) } return func(gas uint64, blockTime uint64) *uint256.Int { @@ -247,7 +258,8 @@ func NewOperatorCostFunc(config *params.ChainConfig, statedb StateGetter) Operat } } -func newOperatorCostFunc(operatorFeeScalar *big.Int, operatorFeeConstant *big.Int) operatorCostFunc { +// newOperatorCostFuncIsthmus returns the operator cost function introduced with Isthmus. +func newOperatorCostFuncIsthmus(operatorFeeScalar *big.Int, operatorFeeConstant *big.Int) operatorCostFunc { return func(gas uint64) *uint256.Int { fee := new(big.Int).SetUint64(gas) fee = fee.Mul(fee, operatorFeeScalar) @@ -256,7 +268,25 @@ func newOperatorCostFunc(operatorFeeScalar *big.Int, operatorFeeConstant *big.In feeU256, overflow := uint256.FromBig(fee) if overflow { - // This should never happen, as (u64.max * u32.max / 1e6) + u64.max is an int of bit length 77 + // This should never happen, as ((u64.max * u32.max) / 1e6) + u64.max fits in 77 bits + panic("overflow in operator cost calculation") + } + + return feeU256 + } +} + +// newOperatorCostFuncOperatorFeeFix returns the operator cost function for the operator fee fix feature. +func newOperatorCostFuncOperatorFeeFix(operatorFeeScalar *big.Int, operatorFeeConstant *big.Int) operatorCostFunc { + return func(gas uint64) *uint256.Int { + fee := new(big.Int).SetUint64(gas) + fee = fee.Mul(fee, operatorFeeScalar) + fee = fee.Mul(fee, oneHundred) + fee = fee.Add(fee, operatorFeeConstant) + + feeU256, overflow := uint256.FromBig(fee) + if overflow { + // This should never happen, as (u64.max * u32.max * 100) + u64.max fits in 103 bits panic("overflow in operator cost calculation") } @@ -526,6 +556,54 @@ func extractL1GasParamsPostIsthmus(data []byte) (gasParams, error) { }, nil } +// ExtractDAFootprintGasScalar extracts the DA footprint gas scalar from the L1 attributes transaction data +// of a Jovian-enabled block. +func ExtractDAFootprintGasScalar(data []byte) (uint16, error) { + if len(data) < JovianL1AttributesLen { + return 0, fmt.Errorf("L1 attributes transaction data too short for DA footprint gas scalar: %d", len(data)) + } + // Future forks need to be added here + if !bytes.Equal(data[0:4], JovianL1AttributesSelector) { + return 0, fmt.Errorf("L1 attributes transaction data does not have Jovian selector") + } + daFootprintGasScalar := binary.BigEndian.Uint16(data[JovianL1AttributesLen-2 : JovianL1AttributesLen]) + return daFootprintGasScalar, nil +} + +// CalcDAFootprint calculates the total DA footprint of a block for an OP Stack chain. +// Jovian introduces a DA footprint block limit which is stored in the BlobGasUsed header field and that is taken +// into account during base fee updates. +// CalcDAFootprint must not be called for pre-Jovian blocks. +func CalcDAFootprint(txs []*Transaction) (uint64, error) { + if len(txs) == 0 || !txs[0].IsDepositTx() { + return 0, errors.New("missing deposit transaction") + } + + // First Jovian block doesn't set the DA footprint gas scalar yet and + // it must not have user transactions. + data := txs[0].Data() + if len(data) == IsthmusL1AttributesLen { + if !txs[len(txs)-1].IsDepositTx() { + // sufficient to check last transaction because deposits precede non-deposit txs + return 0, errors.New("unexpected non-deposit transactions in Jovian activation block") + } + return 0, nil + } // ExtractDAFootprintGasScalar catches all invalid lengths + + daFootprintGasScalar, err := ExtractDAFootprintGasScalar(data) + if err != nil { + return 0, err + } + var daFootprint uint64 + for _, tx := range txs { + if tx.IsDepositTx() { + continue + } + daFootprint += tx.RollupCostData().EstimatedDASize().Uint64() * uint64(daFootprintGasScalar) + } + return daFootprint, nil +} + // L1Cost computes the the data availability fee for transactions in blocks prior to the Ecotone // upgrade. It is used by e2e tests so must remain exported. func L1Cost(rollupDataGas uint64, l1BaseFee, overhead, scalar *big.Int) *big.Int { @@ -587,13 +665,13 @@ func ExtractEcotoneFeeParams(l1FeeParams []byte) (l1BaseFeeScalar, l1BlobBaseFee offset := scalarSectionStart l1BaseFeeScalar = new(big.Int).SetBytes(l1FeeParams[offset : offset+4]) l1BlobBaseFeeScalar = new(big.Int).SetBytes(l1FeeParams[offset+4 : offset+8]) - return + return l1BaseFeeScalar, l1BlobBaseFeeScalar } func ExtractOperatorFeeParams(operatorFeeParams common.Hash) (operatorFeeScalar, operatorFeeConstant *big.Int) { operatorFeeScalar = new(big.Int).SetBytes(operatorFeeParams[20:24]) operatorFeeConstant = new(big.Int).SetBytes(operatorFeeParams[24:32]) - return + return operatorFeeScalar, operatorFeeConstant } func bedrockCalldataGasUsed(costData RollupCostData) (calldataGasUsed *big.Int) { diff --git a/core/types/rollup_cost_test.go b/core/types/rollup_cost_test.go index 0075c2540b..f2f0035720 100644 --- a/core/types/rollup_cost_test.go +++ b/core/types/rollup_cost_test.go @@ -35,6 +35,7 @@ var ( // the emptyTx is out of bounds for the linear regression so it uses the minimum size fjordFee = big.NewInt(3203000) // 100_000_000 * (2 * 1000 * 1e6 * 16 + 3 * 10 * 1e6) / 1e12 ithmusOperatorFee = uint256.NewInt(1256417826611659930) // 1618 * 1439103868 / 1e6 + 1256417826609331460 + jovianOperatorFee = uint256.NewInt(1256650673615173860) // 1618 * 1439103868 * 100 + 1256417826609331460 bedrockGas = big.NewInt(1618) regolithGas = big.NewInt(530) // 530 = 1618 - (16*68) @@ -493,6 +494,13 @@ func TestNewOperatorCostFunc(t *testing.T) { fee = costFunc(bedrockGas.Uint64(), time) require.NotNil(t, fee) require.Equal(t, ithmusOperatorFee, fee) + + // emptyTx fee w/ jovian config should be not 0 + config.JovianTime = &time + costFunc = NewOperatorCostFunc(config, statedb) + fee = costFunc(bedrockGas.Uint64(), time) + require.NotNil(t, fee) + require.Equal(t, jovianOperatorFee, fee) } func TestFlzCompressLen(t *testing.T) { @@ -548,14 +556,16 @@ var emptyTxWithGas = NewTransaction( // combines the L1 cost and operator cost. func TestTotalRollupCostFunc(t *testing.T) { zero := uint64(0) - later := uint64(10) + isthmusTime := uint64(10) + jovianTime := uint64(20) config := ¶ms.ChainConfig{ Optimism: params.OptimismTestConfig.Optimism, RegolithTime: &zero, EcotoneTime: &zero, FjordTime: &zero, HoloceneTime: &zero, - IsthmusTime: &later, + IsthmusTime: &isthmusTime, + JovianTime: &jovianTime, } statedb := &testStateGetter{ baseFee: baseFee, @@ -569,13 +579,24 @@ func TestTotalRollupCostFunc(t *testing.T) { } costFunc := NewTotalRollupCostFunc(config, statedb) - cost := costFunc(emptyTxWithGas, later-1) + + // Pre-Isthmus: only L1 cost + cost := costFunc(emptyTxWithGas, isthmusTime-1) require.NotNil(t, cost) expCost := uint256.MustFromBig(fjordFee) require.Equal(t, expCost, cost, "pre-Isthmus total rollup cost should only contain L1 cost") - cost = costFunc(emptyTxWithGas, later+1) + // Isthmus: L1 cost + Isthmus operator cost + cost = costFunc(emptyTxWithGas, isthmusTime+1) require.NotNil(t, cost) + expCost = uint256.MustFromBig(fjordFee) expCost.Add(expCost, ithmusOperatorFee) - require.Equal(t, expCost, cost, "Isthmus total rollup cost should contain L1 cost and operator cost") + require.Equal(t, expCost, cost, "Isthmus total rollup cost should contain L1 cost and Isthmus operator cost") + + // Jovian: L1 cost + fixed operator cost + cost = costFunc(emptyTxWithGas, jovianTime+1) + require.NotNil(t, cost) + expCost = uint256.MustFromBig(fjordFee) + expCost.Add(expCost, jovianOperatorFee) + require.Equal(t, expCost, cost, "Jovian total rollup cost should contain L1 cost and Jovian operator cost") } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 7d20d5bcbb..18dfb9a9d2 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -227,7 +227,29 @@ var PrecompiledContractsIsthmus = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{0x01, 0x00}): &p256VerifyFjord{}, } +var PrecompiledContractsJovian = map[common.Address]PrecompiledContract{ + common.BytesToAddress([]byte{1}): &ecrecover{}, + common.BytesToAddress([]byte{2}): &sha256hash{}, + common.BytesToAddress([]byte{3}): &ripemd160hash{}, + common.BytesToAddress([]byte{4}): &dataCopy{}, + common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true}, + common.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, + common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, + common.BytesToAddress([]byte{8}): &bn256PairingJovian{}, + common.BytesToAddress([]byte{9}): &blake2F{}, + common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{}, + common.BytesToAddress([]byte{0x0b}): &bls12381G1Add{}, + common.BytesToAddress([]byte{0x0c}): &bls12381G1MultiExpJovian{}, + common.BytesToAddress([]byte{0x0d}): &bls12381G2Add{}, + common.BytesToAddress([]byte{0x0e}): &bls12381G2MultiExpJovian{}, + common.BytesToAddress([]byte{0x0f}): &bls12381PairingJovian{}, + common.BytesToAddress([]byte{0x10}): &bls12381MapG1{}, + common.BytesToAddress([]byte{0x11}): &bls12381MapG2{}, + common.BytesToAddress([]byte{0x01, 0x00}): &p256VerifyFjord{}, +} + var ( + PrecompiledAddressesJovian []common.Address PrecompiledAddressesIsthmus []common.Address PrecompiledAddressesGranite []common.Address PrecompiledAddressesFjord []common.Address @@ -271,11 +293,16 @@ func init() { for k := range PrecompiledContractsIsthmus { PrecompiledAddressesIsthmus = append(PrecompiledAddressesIsthmus, k) } + for k := range PrecompiledContractsJovian { + PrecompiledAddressesJovian = append(PrecompiledAddressesJovian, k) + } } func activePrecompiledContracts(rules params.Rules) PrecompiledContracts { // note: the order of these switch cases is important switch { + case rules.IsOptimismJovian: + return PrecompiledContractsJovian case rules.IsOptimismIsthmus: return PrecompiledContractsIsthmus case rules.IsOptimismGranite: @@ -309,6 +336,8 @@ func ActivePrecompiledContracts(rules params.Rules) PrecompiledContracts { // ActivePrecompiles returns the precompile addresses enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { + case rules.IsOptimismJovian: + return PrecompiledAddressesJovian case rules.IsOptimismIsthmus: return PrecompiledAddressesIsthmus case rules.IsOptimismGranite: @@ -823,6 +852,23 @@ func (c *bn256PairingGranite) Name() string { return "BN254_PAIRING" } +type bn256PairingJovian struct{} + +func (c *bn256PairingJovian) RequiredGas(input []byte) uint64 { + return new(bn256PairingIstanbul).RequiredGas(input) +} + +func (c *bn256PairingJovian) Run(input []byte) ([]byte, error) { + if len(input) > int(params.Bn256PairingMaxInputSizeJovian) { + return nil, errBadPairingInputSize + } + return runBn256Pairing(input) +} + +func (c *bn256PairingJovian) Name() string { + return "BN254_PAIRING" +} + // bn256PairingIstanbul implements a pairing pre-compile for the bn256 curve // conforming to Istanbul consensus rules. type bn256PairingIstanbul struct{} @@ -986,6 +1032,25 @@ func (c *bls12381G1MultiExpIsthmus) Name() string { return "BLS12_G1MSM" } +type bls12381G1MultiExpJovian struct { +} + +func (c *bls12381G1MultiExpJovian) RequiredGas(input []byte) uint64 { + return new(bls12381G1MultiExp).RequiredGas(input) +} + +func (c *bls12381G1MultiExpJovian) Run(input []byte) ([]byte, error) { + if len(input) > int(params.Bls12381G1MulMaxInputSizeJovian) { + return nil, errBLS12381MaxG1Size + } + + return new(bls12381G1MultiExp).Run(input) +} + +func (c *bls12381G1MultiExpJovian) Name() string { + return "BLS12_G1MSM" +} + // bls12381G1MultiExp implements EIP-2537 G1MultiExp precompile for Prague (no size limits). func (c *bls12381G1Add) Name() string { return "BLS12_G1ADD" @@ -1115,6 +1180,25 @@ func (c *bls12381G2MultiExpIsthmus) Name() string { return "BLS12_G2MSM" } +type bls12381G2MultiExpJovian struct { +} + +func (c *bls12381G2MultiExpJovian) RequiredGas(input []byte) uint64 { + return new(bls12381G2MultiExp).RequiredGas(input) +} + +func (c *bls12381G2MultiExpJovian) Run(input []byte) ([]byte, error) { + if len(input) > int(params.Bls12381G2MulMaxInputSizeJovian) { + return nil, errBLS12381MaxG2Size + } + + return new(bls12381G2MultiExp).Run(input) +} + +func (c *bls12381G2MultiExpJovian) Name() string { + return "BLS12_G2MSM" +} + // bls12381G2MultiExp implements EIP-2537 G2MultiExp precompile. type bls12381G2MultiExp struct{} @@ -1198,6 +1282,25 @@ func (c *bls12381PairingIsthmus) Name() string { return "BLS12_PAIRING_CHECK" } +type bls12381PairingJovian struct { +} + +func (c *bls12381PairingJovian) RequiredGas(input []byte) uint64 { + return new(bls12381Pairing).RequiredGas(input) +} + +func (c *bls12381PairingJovian) Run(input []byte) ([]byte, error) { + if len(input) > int(params.Bls12381PairingMaxInputSizeJovian) { + return nil, errBLS12381MaxPairingSize + } + + return new(bls12381Pairing).Run(input) +} + +func (c *bls12381PairingJovian) Name() string { + return "BLS12_PAIRING_CHECK" +} + // bls12381Pairing implements EIP-2537 Pairing precompile. type bls12381Pairing struct{} diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index 74f2f55bc6..2596c52298 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -74,6 +74,11 @@ var allPrecompiles = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{0x0b}): &p256Verify{}, common.BytesToAddress([]byte{0x01, 0x00}): &p256VerifyFjord{}, + + common.BytesToAddress([]byte{0x2f, 0x08}): &bn256PairingJovian{}, + common.BytesToAddress([]byte{0x2f, 0x0e}): &bls12381PairingJovian{}, + common.BytesToAddress([]byte{0x2f, 0x0b}): &bls12381G1MultiExpJovian{}, + common.BytesToAddress([]byte{0x2f, 0x0d}): &bls12381G2MultiExpJovian{}, } // EIP-152 test vectors diff --git a/crypto/kzg4844/kzg4844.go b/crypto/kzg4844/kzg4844.go index 9da2386368..3ccc204838 100644 --- a/crypto/kzg4844/kzg4844.go +++ b/crypto/kzg4844/kzg4844.go @@ -34,10 +34,10 @@ var ( blobT = reflect.TypeFor[Blob]() commitmentT = reflect.TypeFor[Commitment]() proofT = reflect.TypeFor[Proof]() - - CellProofsPerBlob = 128 ) +const CellProofsPerBlob = 128 + // Blob represents a 4844 data blob. type Blob [131072]byte diff --git a/fork.yaml b/fork.yaml index 7957643d17..f4c1d93b5f 100644 --- a/fork.yaml +++ b/fork.yaml @@ -199,7 +199,10 @@ def: - title: Warn on missing hardfork data and emit additional metrics globs: - "core/blockchain.go" - - title: Additional metrics + - title: Define additional header-based metrics + globs: + - "core/blockchain_optimism.go" + - title: Add hooks for additional header-chain metrics globs: - "core/headerchain.go" - title: Optional Engine API extensions @@ -250,12 +253,15 @@ def: - title: "User API enhancements" description: "Encode the Deposit Tx properties, the L1 costs, and daisy-chain RPC-calls for pre-Bedrock historical data" sub: - - title: "Receipts metadata" + - title: "Receipts metadata & deposit receipts" description: | Pre-Bedrock L1-cost receipt data is loaded from the database if available, and post-Bedrock the L1-cost metadata is hydrated on-the-fly based on the L1 fee information in the corresponding block. + We also populate receipts with L1 block attributes like Operator Fee and DA Footprint parameters. + Furthermore, OP Stack introduces Deposit receipts, a special kind of receipts for Deposit transactions. globs: - "core/types/receipt.go" + - "core/types/receipt_opstack.go" - "core/types/gen_receipt_json.go" - "core/rawdb/accessors_chain.go" - title: "API Backend" diff --git a/go.mod b/go.mod index a6dbd8dc30..3197ca1877 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 - github.com/ethereum/c-kzg-4844/v2 v2.1.0 + github.com/ethereum/c-kzg-4844/v2 v2.1.5 github.com/ethereum/go-verkle v0.2.2 github.com/fatih/color v1.16.0 github.com/ferranbt/fastssz v0.1.4 @@ -38,7 +38,7 @@ require ( github.com/gorilla/websocket v1.4.2 github.com/graph-gophers/graphql-go v1.3.0 github.com/hashicorp/go-bexpr v0.1.10 - github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 + github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.3.2 github.com/huin/goupnp v1.3.0 @@ -62,7 +62,7 @@ require ( github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 github.com/stretchr/testify v1.10.0 - github.com/supranational/blst v0.3.14 + github.com/supranational/blst v0.3.15 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/urfave/cli/v2 v2.27.5 go.uber.org/automaxprocs v1.5.2 diff --git a/go.sum b/go.sum index 7da7a0894b..e036aa0532 100644 --- a/go.sum +++ b/go.sum @@ -115,8 +115,8 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= -github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= -github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= +github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= @@ -191,8 +191,8 @@ github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY4 github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= -github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= -github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330= +github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db/go.mod h1:xTEYN9KCHxuYHs+NmrmzFcnvHMzLLNiGFafCb1n3Mfg= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= @@ -355,8 +355,8 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= -github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o= +github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index d928541b79..8297056c8c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1720,7 +1720,7 @@ func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber u "effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice), } - if chainConfig.Optimism != nil && !tx.IsDepositTx() { + if chainConfig.IsOptimism() && !tx.IsDepositTx() { fields["l1GasPrice"] = (*hexutil.Big)(receipt.L1GasPrice) fields["l1GasUsed"] = (*hexutil.Big)(receipt.L1GasUsed) fields["l1Fee"] = (*hexutil.Big)(receipt.L1Fee) @@ -1745,8 +1745,14 @@ func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber u if receipt.OperatorFeeConstant != nil { fields["operatorFeeConstant"] = hexutil.Uint64(*receipt.OperatorFeeConstant) } + // Fields added in Jovian + if receipt.DAFootprintGasScalar != nil { + fields["daFootprintGasScalar"] = hexutil.Uint64(*receipt.DAFootprintGasScalar) + // Jovian repurposes blobGasUsed for DA footprint gas used + fields["blobGasUsed"] = hexutil.Uint64(receipt.BlobGasUsed) + } } - if chainConfig.Optimism != nil && tx.IsDepositTx() && receipt.DepositNonce != nil { + if chainConfig.IsOptimism() && tx.IsDepositTx() && receipt.DepositNonce != nil { fields["depositNonce"] = hexutil.Uint64(*receipt.DepositNonce) if receipt.DepositReceiptVersion != nil { fields["depositReceiptVersion"] = hexutil.Uint64(*receipt.DepositReceiptVersion) diff --git a/metrics/gauge.go b/metrics/gauge.go index 909fca1304..4f93e22487 100644 --- a/metrics/gauge.go +++ b/metrics/gauge.go @@ -45,6 +45,7 @@ func (g *Gauge) Update(v int64) { (*atomic.Int64)(g).Store(v) } +// OPStack addition // TryUpdate updates the gauge if the value is non-nil, converting it to int64. func (g *Gauge) TryUpdate(v *big.Int) { if v == nil { @@ -53,6 +54,15 @@ func (g *Gauge) TryUpdate(v *big.Int) { (*atomic.Int64)(g).Store(v.Int64()) } +// OPStack additon +// TryUpdate updates the gauge if the value is non-nil, converting it to int64. +func (g *Gauge) TryUpdateUint64(v *uint64) { + if v == nil { + return + } + (*atomic.Int64)(g).Store(int64(*v)) +} + // UpdateIfGt updates the gauge's value if v is larger then the current value. func (g *Gauge) UpdateIfGt(v int64) { value := (*atomic.Int64)(g) diff --git a/miner/miner_optimism_test.go b/miner/miner_optimism_test.go new file mode 100644 index 0000000000..4be405f159 --- /dev/null +++ b/miner/miner_optimism_test.go @@ -0,0 +1,152 @@ +package miner + +import ( + "encoding/binary" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +const testDAFootprintGasScalar = 400 + +// TestDAFootprintMining tests that the miner correctly limits the DA footprint of the block. +// It builds a block via the miner from txpool +// transactions and then imports the block into the chain, asserting that +// execution succeeds. +func TestDAFootprintMining(t *testing.T) { + requirePreJovianBehavior := func(t *testing.T, block *types.Block, receipts []*types.Receipt) { + var txGas uint64 + for _, receipt := range receipts { + txGas += receipt.GasUsed + } + require.Equal(t, txGas, block.GasUsed(), "total tx gas used should be equal to block gas used") + require.Zero(t, *block.Header().BlobGasUsed, "expected 0 blob gas used") + } + + requireLargeDAFootprintBehavior := func(t *testing.T, block *types.Block, receipts []*types.Receipt) { + var ( + txGas uint64 + daFootprint uint64 + txs = block.Transactions() + ) + + require.Equal(t, len(receipts), len(txs)) + + for i, receipt := range receipts { + txGas += receipt.GasUsed + if txs[i].IsDepositTx() { + continue + } + daFootprint += txs[i].RollupCostData().EstimatedDASize().Uint64() * testDAFootprintGasScalar + } + require.Equal(t, txGas, block.GasUsed(), "total tx gas used should be equal to block gas used") + require.Greater(t, daFootprint, block.GasUsed(), "total DA footprint used should be greater than block gas used") + require.LessOrEqual(t, daFootprint, block.GasLimit(), "total DA footprint used should be less or equal block gas limit") + } + t.Run("jovian-one-min-tx", func(t *testing.T) { + testMineAndExecute(t, 0, jovianConfig(), func(t *testing.T, _ *core.BlockChain, block *types.Block, receipts []*types.Receipt) { + require.Len(t, receipts, 2) // 1 test pending tx and 1 deposit tx + requireLargeDAFootprintBehavior(t, block, receipts) + + // Double-confirm DA footprint calculation manually in this simple transaction case. + daFootprint, err := types.CalcDAFootprint(block.Transactions()) + require.NoError(t, err, "failed to calculate DA footprint") + require.Equal(t, daFootprint, *block.Header().BlobGasUsed, + "header blob gas used should match calculated DA footprint") + require.Equal(t, testDAFootprintGasScalar*types.MinTransactionSize.Uint64(), daFootprint, + "simple pending transaction should lead to min DA footprint") + }) + }) + t.Run("jovian-at-limit", func(t *testing.T) { + testMineAndExecute(t, 17, jovianConfig(), func(t *testing.T, _ *core.BlockChain, block *types.Block, receipts []*types.Receipt) { + require.Len(t, receipts, 19) // including 1 test pending tx and 1 deposit tx + requireLargeDAFootprintBehavior(t, block, receipts) + }) + }) + t.Run("jovian-above-limit", func(t *testing.T) { + testMineAndExecute(t, 18, jovianConfig(), func(t *testing.T, _ *core.BlockChain, block *types.Block, receipts []*types.Receipt) { + require.Len(t, receipts, 19) // same as for 17, because 18th tx from pool shouldn't have been included + requireLargeDAFootprintBehavior(t, block, receipts) + }) + }) + t.Run("isthmus", func(t *testing.T) { + testMineAndExecute(t, 39, isthmusConfig(), func(t *testing.T, _ *core.BlockChain, block *types.Block, receipts []*types.Receipt) { + require.Len(t, receipts, 41) // including 1 test pending tx and 1 deposit tx + requirePreJovianBehavior(t, block, receipts) + }) + }) + + t.Run("jovian-invalid-blobGasUsed", func(t *testing.T) { + testMineAndExecute(t, 0, jovianConfig(), func(t *testing.T, bc *core.BlockChain, block *types.Block, receipts []*types.Receipt) { + require.Len(t, receipts, 2) // 1 test pending tx and 1 deposit tx + header := block.Header() + *header.BlobGasUsed += 1 // invalidate blobGasUsed + invalidBlock := block.WithSeal(header) + _, err := bc.InsertChain(types.Blocks{invalidBlock}) + require.ErrorContains(t, err, "invalid DA footprint in blobGasUsed field (remote: 40001 local: 40000)") + }) + }) +} + +func testMineAndExecute(t *testing.T, numTxs uint64, cfg *params.ChainConfig, assertFn func(*testing.T, *core.BlockChain, *types.Block, []*types.Receipt)) { + db := rawdb.NewMemoryDatabase() + w, b := newTestWorker(t, cfg, beacon.New(ethash.NewFaker()), db, 0) + + // Start from nonce 1 to avoid colliding with the preloaded pending tx. + txs := genTxs(1, numTxs) + + // Add to txpool for the miner to pick up. + if errs := b.txPool.Add(txs, false); len(errs) > 0 { + for _, err := range errs { + require.NoError(t, err, "failed adding tx to pool") + } + } + + parent := b.chain.CurrentBlock() + ts := parent.Time + 12 + dtx := new(types.DepositTx) + if cfg.IsDAFootprintBlockLimit(parent.Time) { + dtx = jovianDepositTx(testDAFootprintGasScalar) + } + + genParams := &generateParams{ + parentHash: b.chain.CurrentBlock().Hash(), + timestamp: ts, + withdrawals: types.Withdrawals{}, + beaconRoot: new(common.Hash), + gasLimit: ptr(uint64(1e6)), // Small gas limit to easily fill block + txs: types.Transactions{types.NewTx(dtx)}, + eip1559Params: eip1559.EncodeHolocene1559Params(250, 6), + } + if cfg.IsMinBaseFee(ts) { + genParams.minBaseFee = new(uint64) + } + r := w.generateWork(genParams, false) + require.NoError(t, r.err, "block generation failed") + require.NotNil(t, r.block, "no block generated") + + assertFn(t, b.chain, r.block, r.receipts) + + // Import the block into the chain, which executes it via StateProcessor. + _, err := b.chain.InsertChain(types.Blocks{r.block}) + require.NoError(t, err, "block import/execution failed") +} + +func jovianDepositTx(daFootprintGasScalar uint16) *types.DepositTx { + data := make([]byte, types.JovianL1AttributesLen) + copy(data[0:4], types.JovianL1AttributesSelector) + binary.BigEndian.PutUint16(data[types.JovianL1AttributesLen-2:types.JovianL1AttributesLen], daFootprintGasScalar) + return &types.DepositTx{Data: data} +} + +func ptr[T any](v T) *T { + return &v +} diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index 387ce3bbaa..4665a43a96 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -25,9 +25,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/misc/eip1559" @@ -39,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" - "github.com/stretchr/testify/require" ) var ( @@ -74,7 +76,10 @@ const ( numDAFilterTxs = 256 ) -var zero = uint64(0) +var ( + zero = uint64(0) + validEIP1559Params = eip1559.EncodeHolocene1559Params(250, 6) +) func init() { testTxPoolConfig = legacypool.DefaultConfig @@ -118,7 +123,7 @@ type testWorkerBackend struct { } func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend { - var gspec = &core.Genesis{ + gspec := &core.Genesis{ Config: chainConfig, Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, } @@ -127,7 +132,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength) copy(gspec.ExtraData[32:32+common.AddressLength], testBankAddress.Bytes()) e.Authorize(testBankAddress) - case *ethash.Ethash: + case *ethash.Ethash, *beacon.Beacon: default: t.Fatalf("unexpected consensus engine type: %T", engine) } @@ -166,23 +171,42 @@ func TestBuildPayload(t *testing.T) { // the builder routine t.Run("with-tx-pool", func(t *testing.T) { testBuildPayload(t, false, false, nil, params.TestChainConfig) }) t.Run("with-tx-pool-interrupt", func(t *testing.T) { testBuildPayload(t, false, true, nil, params.TestChainConfig) }) - params1559 := []byte{0, 1, 2, 3, 4, 5, 6, 7} - t.Run("with-params-holocene", func(t *testing.T) { testBuildPayload(t, false, false, params1559, holoceneConfig()) }) - t.Run("with-params-no-tx-pool-holocene", func(t *testing.T) { testBuildPayload(t, true, false, params1559, holoceneConfig()) }) - t.Run("with-params-interrupt-holocene", func(t *testing.T) { testBuildPayload(t, false, true, params1559, holoceneConfig()) }) - t.Run("with-params-jovian", func(t *testing.T) { testBuildPayload(t, false, false, params1559, jovianConfig()) }) - t.Run("with-params-no-tx-pool-jovian", func(t *testing.T) { testBuildPayload(t, true, false, params1559, jovianConfig()) }) - t.Run("with-params-interrupt-jovian", func(t *testing.T) { testBuildPayload(t, false, true, params1559, jovianConfig()) }) - - t.Run("wrong-config-no-params", func(t *testing.T) { testBuildPayloadWrongConfig(t, nil, holoceneConfig()) }) - t.Run("wrong-config-params-holocene", func(t *testing.T) { testBuildPayloadWrongConfig(t, params1559, holoceneConfig()) }) - t.Run("wrong-config-params-jovian", func(t *testing.T) { testBuildPayloadWrongConfig(t, params1559, jovianConfig()) }) + t.Run("with-params-holocene", func(t *testing.T) { testBuildPayload(t, false, false, validEIP1559Params, holoceneConfig()) }) + t.Run("with-params-no-tx-pool-holocene", func(t *testing.T) { testBuildPayload(t, true, false, validEIP1559Params, holoceneConfig()) }) + t.Run("with-params-interrupt-holocene", func(t *testing.T) { testBuildPayload(t, false, true, validEIP1559Params, holoceneConfig()) }) + t.Run("with-params-jovian", func(t *testing.T) { testBuildPayload(t, false, false, validEIP1559Params, jovianConfig()) }) + t.Run("with-params-no-tx-pool-jovian", func(t *testing.T) { testBuildPayload(t, true, false, validEIP1559Params, jovianConfig()) }) + t.Run("with-params-interrupt-jovian", func(t *testing.T) { testBuildPayload(t, false, true, validEIP1559Params, jovianConfig()) }) zeroParams := make([]byte, 8) t.Run("with-zero-params-holocene", func(t *testing.T) { testBuildPayload(t, true, false, zeroParams, holoceneConfig()) }) t.Run("with-zero-params-jovian", func(t *testing.T) { testBuildPayload(t, true, false, zeroParams, jovianConfig()) }) } +func TestBuildPayloadError(t *testing.T) { + t.Run("pre-holocene-with-params", func(t *testing.T) { + cfg := holoceneConfig() + cfg.HoloceneTime = nil + testBuildPayloadError(t, cfg, + "got eip1559 params, expected none", + func(args *BuildPayloadArgs) { args.EIP1559Params = validEIP1559Params }) + }) + t.Run("holocene-no-params", func(t *testing.T) { + testBuildPayloadError(t, holoceneConfig(), + "holocene eip-1559 params should be 8 bytes, got 0", + func(args *BuildPayloadArgs) { args.EIP1559Params = nil }) + }) + t.Run("holocene-bad-params", func(t *testing.T) { + testBuildPayloadError(t, holoceneConfig(), + "holocene params cannot have a 0 denominator unless elasticity is also 0", + func(args *BuildPayloadArgs) { args.EIP1559Params = eip1559.EncodeHolocene1559Params(0, 6) }) + }) + t.Run("jovian-no-minbasefee", func(t *testing.T) { + testBuildPayloadError(t, jovianConfig(), "missing minBaseFee", + func(args *BuildPayloadArgs) { args.MinBaseFee = nil }) + }) +} + func TestDAFilters(t *testing.T) { // Each test case inserts one pending small (DA cost 100) transaction followed by // numDAFilterTxs transactions that have random calldata (min DA size >> 100) @@ -203,50 +227,65 @@ func TestDAFilters(t *testing.T) { } func holoceneConfig() *params.ChainConfig { - config := *params.TestChainConfig - config.LondonBlock = big.NewInt(0) - t := uint64(0) - config.CanyonTime = &t - config.HoloceneTime = &t - canyonDenom := uint64(250) - config.Optimism = ¶ms.OptimismConfig{ - EIP1559Elasticity: 6, - EIP1559Denominator: 50, - EIP1559DenominatorCanyon: &canyonDenom, - } + config := *params.OptimismTestConfig + config.IsthmusTime = nil + config.JovianTime = nil + config.PragueTime = nil + config.OsakaTime = nil return &config } -func jovianConfig() *params.ChainConfig { +func isthmusConfig() *params.ChainConfig { config := holoceneConfig() - zero := uint64(0) + config.IsthmusTime = &zero + config.PragueTime = &zero + return config +} + +func jovianConfig() *params.ChainConfig { + config := isthmusConfig() config.JovianTime = &zero return config } -// newPayloadArgs returns a BuildPaylooadArgs with the given parentHash, eip-1559 params, -// minBaseFee, testTimestamp for Timestamp, and testRecipient for recipient. NoTxPool is set to true. -func newPayloadArgs(parentHash common.Hash, params1559 []byte, minBaseFee *uint64) *BuildPayloadArgs { - return &BuildPayloadArgs{ - Parent: parentHash, - Timestamp: testTimestamp, - Random: common.Hash{}, - FeeRecipient: testRecipient, - NoTxPool: true, - EIP1559Params: params1559, - MinBaseFee: minBaseFee, +// newPayloadArgs returns valid BuildPaylooadArgs for the given chain config with the given parentHash, +// testTimestamp for Timestamp, and testRecipient for recipient. +// OP-Stack chains will have one dummy deposit transaction in Transactions. +// NoTxPool is set to true. +// A test can modify individual fields afterwards to enable the transaction +// pool, create invalid eip-1559 params, minBaseFee, etc. +func newPayloadArgs(parentHash common.Hash, cfg *params.ChainConfig) *BuildPayloadArgs { + args := &BuildPayloadArgs{ + Parent: parentHash, + Timestamp: testTimestamp, + FeeRecipient: testRecipient, + Withdrawals: types.Withdrawals{}, + NoTxPool: true, } + + if !cfg.IsOptimism() { + return args + } + + if cfg.IsHolocene(args.Timestamp) { + args.EIP1559Params = validEIP1559Params + } + dtx := new(types.DepositTx) + if cfg.IsDAFootprintBlockLimit(args.Timestamp) { + dtx = jovianDepositTx(testDAFootprintGasScalar) + } + args.Transactions = []*types.Transaction{types.NewTx(dtx)} + if cfg.IsMinBaseFee(args.Timestamp) { + args.MinBaseFee = ptr(uint64(1e9)) + } + + return args } func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte, config *params.ChainConfig) { t.Parallel() db := rawdb.NewMemoryDatabase() - var minBaseFee *uint64 - if config.IsMinBaseFee(testTimestamp) { - val := uint64(1e9) - minBaseFee = &val - } w, b := newTestWorker(t, config, ethash.NewFaker(), db, 0) const numInterruptTxs = 256 @@ -258,8 +297,9 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte, b.txPool.Add(txs, false) } - args := newPayloadArgs(b.chain.CurrentBlock().Hash(), params1559, minBaseFee) + args := newPayloadArgs(b.chain.CurrentBlock().Hash(), config) args.NoTxPool = noTxPool + args.EIP1559Params = params1559 // payload resolution now interrupts block building, so we have to // wait for the payloading building process to build its first block @@ -269,6 +309,9 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte, } verify := func(outer *engine.ExecutionPayloadEnvelope, txs int) { t.Helper() + if config.IsOptimism() { + txs++ // account for dummy deposit tx + } if outer == nil { t.Fatal("ExecutionPayloadEnvelope is nil") } @@ -314,7 +357,7 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte, } if versionByte == eip1559.MinBaseFeeExtraDataVersionByte { buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, *minBaseFee) + binary.BigEndian.PutUint64(buf, *args.MinBaseFee) expected = append(expected, buf...) } } @@ -341,7 +384,7 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte, if e != uint64(expectedElasticity) { t.Fatalf("elasticity doesn't match. want: %d, got %d", expectedElasticity, e) } - require.Equal(t, minBaseFee, extractedMinBaseFee, "minBaseFee doesn't match") + require.Equal(t, args.MinBaseFee, extractedMinBaseFee, "minBaseFee doesn't match") } if noTxPool { @@ -377,8 +420,7 @@ func testDAFilters(t *testing.T, maxDATxSize, maxDABlockSize *big.Int, expectedT txs := genTxs(1, numDAFilterTxs) b.txPool.Add(txs, false) - params1559 := []byte{0, 1, 2, 3, 4, 5, 6, 7} - args := newPayloadArgs(b.chain.CurrentBlock().Hash(), params1559, &zero) + args := newPayloadArgs(b.chain.CurrentBlock().Hash(), config) args.NoTxPool = false payload, err := w.buildPayload(args, false) @@ -387,65 +429,25 @@ func testDAFilters(t *testing.T, maxDATxSize, maxDABlockSize *big.Int, expectedT } payload.WaitFull() result := payload.ResolveFull().ExecutionPayload - if len(result.Transactions) != expectedTxCount { + if len(result.Transactions) != expectedTxCount+1 { // account for dummy deposit tx t.Fatalf("Unexpected transaction set: got %d, expected %d", len(result.Transactions), expectedTxCount) } } -func testBuildPayloadWrongConfig(t *testing.T, params1559 []byte, config *params.ChainConfig) { +func testBuildPayloadError(t *testing.T, config *params.ChainConfig, expErrStr string, mod func(*BuildPayloadArgs)) { t.Parallel() db := rawdb.NewMemoryDatabase() - wrongConfig := *config - if len(params1559) != 0 { - // deactivate holocene and jovian and make sure non-empty params get rejected - wrongConfig.HoloceneTime = nil - wrongConfig.JovianTime = nil - } - w, b := newTestWorker(t, &wrongConfig, ethash.NewFaker(), db, 0) - - args := newPayloadArgs(b.chain.CurrentBlock().Hash(), params1559, &zero) - payload, err := w.buildPayload(args, false) - if err == nil && (payload == nil || payload.err == nil) { - t.Fatalf("expected error, got none") - } -} - -func TestBuildPayloadInvalidHoloceneParams(t *testing.T) { - t.Parallel() - db := rawdb.NewMemoryDatabase() - config := holoceneConfig() - w, b := newTestWorker(t, config, ethash.NewFaker(), db, 0) - - // 0 denominators shouldn't be allowed - badParams := eip1559.EncodeHolocene1559Params(0, 6) - - args := newPayloadArgs(b.chain.CurrentBlock().Hash(), badParams, &zero) - payload, err := w.buildPayload(args, false) - if err == nil && (payload == nil || payload.err == nil) { - t.Fatalf("expected error, got none") - } -} - -func TestBuildPayloadInvalidMinBaseFeeExtraData(t *testing.T) { - t.Parallel() - db := rawdb.NewMemoryDatabase() - config := jovianConfig() w, b := newTestWorker(t, config, ethash.NewFaker(), db, 0) - // 0 denominators shouldn't be allowed - badParams := eip1559.EncodeMinBaseFeeExtraData(0, 6, 0) - - args := newPayloadArgs(b.chain.CurrentBlock().Hash(), badParams, &zero) + args := newPayloadArgs(b.chain.CurrentBlock().Hash(), config) + mod(args) payload, err := w.buildPayload(args, false) - if err == nil && (payload == nil || payload.err == nil) { - t.Fatalf("expected error, got none") - } - - // missing minBaseFee shouldn't be allowed (use Holocene encoder) - badParams = eip1559.EncodeHoloceneExtraData(250, 6) - args = newPayloadArgs(b.chain.CurrentBlock().Hash(), badParams, &zero) - payload, err = w.buildPayload(args, false) - if err == nil && (payload == nil || payload.err == nil) { + require.Nil(t, payload) + if err != nil { + require.ErrorContains(t, err, expErrStr) + } else if payload.err != nil { + require.ErrorContains(t, payload.err, expErrStr) + } else { t.Fatalf("expected error, got none") } } @@ -466,7 +468,7 @@ func genTxs(startNonce, count uint64) types.Transactions { Nonce: nonce, To: &testUserAddress, Value: big.NewInt(1000), - Gas: params.TxGas + uint64(len(randomBytes))*16, + Gas: params.TxGas + uint64(len(randomBytes))*40, GasPrice: big.NewInt(params.InitialBaseFee), Data: randomBytes, }) diff --git a/miner/worker.go b/miner/worker.go index 0c38788cf4..c98a69bb0e 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -74,6 +74,9 @@ type environment struct { coinbase common.Address evm *vm.EVM + // OP-Stack addition: DA footprint block limit + daFootprintGasScalar uint16 + header *types.Header txs []*types.Transaction receipts []*types.Receipt @@ -311,10 +314,13 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir } if genParams.gasLimit != nil { // override gas limit if specified header.GasLimit = *genParams.gasLimit - } else if miner.chain.Config().Optimism != nil && miner.config.GasCeil != 0 { + } else if miner.chain.Config().IsOptimism() && miner.config.GasCeil != 0 { // configure the gas limit of pending blocks with the miner gas limit config when using optimism header.GasLimit = miner.config.GasCeil } + if miner.chainConfig.IsMinBaseFee(header.Time) && genParams.minBaseFee == nil { + return nil, errors.New("missing minBaseFee") + } if cfg := miner.chainConfig; cfg.IsHolocene(header.Time) { if err := eip1559.ValidateHolocene1559Params(genParams.eip1559Params); err != nil { return nil, err @@ -355,6 +361,15 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir return nil, err } env.noTxs = genParams.noTxs + if miner.chainConfig.IsDAFootprintBlockLimit(parent.Time) { + if len(genParams.txs) == 0 || !genParams.txs[0].IsDepositTx() { + return nil, errors.New("missing L1 attributes deposit transaction") + } + env.daFootprintGasScalar, err = types.ExtractDAFootprintGasScalar(genParams.txs[0].Data()) + if err != nil { + return nil, err + } + } if header.ParentBeaconRoot != nil { core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, env.evm) } @@ -507,7 +522,12 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran if env.gasPool == nil { env.gasPool = new(core.GasPool).AddGas(gasLimit) } + + // OP-Stack additions: throttling and DA footprint limit blockDABytes := new(big.Int) + isJovian := miner.chainConfig.IsDAFootprintBlockLimit(env.header.Time) + minTransactionDAFootprint := types.MinTransactionSize.Uint64() * uint64(env.daFootprintGasScalar) + for { // Check interruption signal and abort building if it's fired. if interrupt != nil { @@ -520,6 +540,17 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) break } + + var daFootprintLeft uint64 + if isJovian { + daFootprintLeft = gasLimit - *env.header.BlobGasUsed + // If we don't have enough DA space for any further transactions then we're done. + if daFootprintLeft < minTransactionDAFootprint { + log.Debug("Not enough DA space for further transactions", "have", daFootprintLeft, "want", minTransactionDAFootprint) + break + } + } + // If we don't have enough blob space for any further blob transactions, // skip that list altogether if !blobTxs.Empty() && env.blobs >= eip4844.MaxBlobsPerBlock(miner.chainConfig, env.header.Time) { @@ -569,6 +600,19 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran } } + // OP-Stack addition: Jovian DA footprint limit + var txDAFootprint uint64 + // Note that commitTransaction is only called after deposit transactions have already been committed, + // so we don't need to resolve the transaction here and exclude deposits. + if isJovian { + txDAFootprint = ltx.DABytes.Uint64() * uint64(env.daFootprintGasScalar) + if daFootprintLeft < txDAFootprint { + log.Debug("Not enough DA space left for transaction", "hash", ltx.Hash, "left", daFootprintLeft, "needed", txDAFootprint) + txs.Pop() + continue + } + } + // OP-Stack addition: sequencer throttling daBytesAfter := new(big.Int) if ltx.DABytes != nil && miner.config.MaxDABlockSize != nil { @@ -641,6 +685,9 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran case errors.Is(err, nil): // Everything ok, collect the logs and shift in the next transaction from the same account blockDABytes = daBytesAfter + if isJovian { + *env.header.BlobGasUsed += txDAFootprint + } txs.Shift() default: diff --git a/params/config.go b/params/config.go index 084eddbe30..2ada8f0e61 100644 --- a/params/config.go +++ b/params/config.go @@ -68,11 +68,17 @@ var ( ShanghaiTime: newUint64(1681338455), CancunTime: newUint64(1710338135), PragueTime: newUint64(1746612311), + OsakaTime: newUint64(1764798551), + BPO1Time: newUint64(1765290071), + BPO2Time: newUint64(1767747671), DepositContractAddress: common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"), Ethash: new(EthashConfig), BlobScheduleConfig: &BlobScheduleConfig{ Cancun: DefaultCancunBlobConfig, Prague: DefaultPragueBlobConfig, + Osaka: DefaultOsakaBlobConfig, + BPO1: DefaultBPO1BlobConfig, + BPO2: DefaultBPO2BlobConfig, }, } // HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network. @@ -377,6 +383,7 @@ var ( OptimismTestConfig = func() *ChainConfig { conf := *MergedTestChainConfig // copy the config conf.BlobScheduleConfig = nil + conf.OsakaTime = nil // needs to be removed when production fork introduces Osaka conf.BedrockBlock = big.NewInt(0) zero := uint64(0) conf.RegolithTime = &zero @@ -386,9 +393,9 @@ var ( conf.GraniteTime = &zero conf.HoloceneTime = &zero conf.IsthmusTime = &zero - conf.InteropTime = nil conf.JovianTime = nil - conf.Optimism = &OptimismConfig{EIP1559Elasticity: 50, EIP1559Denominator: 10, EIP1559DenominatorCanyon: uint64ptr(250)} + conf.InteropTime = nil + conf.Optimism = &OptimismConfig{EIP1559Elasticity: 6, EIP1559Denominator: 50, EIP1559DenominatorCanyon: uint64ptr(250)} return &conf }() ) diff --git a/params/protocol_params.go b/params/protocol_params.go index 25c8c8a6a8..95f291e881 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -187,6 +187,11 @@ const ( Bls12381G2MulMaxInputSizeIsthmus uint64 = 488448 // Maximum input size for BLS12-381 G2 multiple-scalar-multiply operation Bls12381PairingMaxInputSizeIsthmus uint64 = 235008 // Maximum input size for BLS12-381 pairing check + Bn256PairingMaxInputSizeJovian uint64 = 81984 // bn256Pairing limit (427 pairs) + Bls12381G1MulMaxInputSizeJovian uint64 = 288960 // BLS12-381 G1 MSM limit (1,806 pairs) + Bls12381G2MulMaxInputSizeJovian uint64 = 278784 // BLS12-381 G2 MSM limit (968 pairs) + Bls12381PairingMaxInputSizeJovian uint64 = 156672 // BLS12-381 pairing limit (408 pairs) + // The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529, // up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529 RefundQuotient uint64 = 2 diff --git a/superchain-registry-commit.txt b/superchain-registry-commit.txt index 5ce6d82034..b5243710e5 100644 --- a/superchain-registry-commit.txt +++ b/superchain-registry-commit.txt @@ -1 +1 @@ -d56233c1e5254fc2fd769d5b33269502a1fe9ef8 \ No newline at end of file +720185c32b0599fa31b14f101cbc990ec39c0a36 diff --git a/superchain/superchain-configs.zip b/superchain/superchain-configs.zip index c9eb8f0a12..c0317192e5 100644 Binary files a/superchain/superchain-configs.zip and b/superchain/superchain-configs.zip differ diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index 4639932763..47a632fd37 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -333,7 +333,16 @@ func (db *Database) Journal(root common.Hash) error { if db.readOnly { return errDatabaseReadOnly } - + // Forcibly sync the ancient store before persisting the in-memory layers. + // This prevents an edge case where the in-memory layers are persisted + // but the ancient store is not properly closed, resulting in recent writes + // being lost. After a restart, the ancient store would then be misaligned + // with the disk layer, causing data corruption. + if db.stateFreezer != nil { + if err := db.stateFreezer.SyncAncient(); err != nil { + return err + } + } // Store the journal into the database and return var ( file *os.File