diff --git a/README.md b/README.md index bc08437dba6..b163d156771 100644 --- a/README.md +++ b/README.md @@ -89,9 +89,9 @@ If your chain is the one of `op-mainnet` & `op-sepolia`, or your chain is synced **[Optional]** Disables transaction pool gossiping. Though this is not required, it's useful to set this to true since transaction pool gossip is currently unsupported in the Optimism protocol. If not provided, default value is set to `false`. -### `--maxpeers=0`, `--nodiscover` +### `--maxpeers=0`, `--nodiscover`, `--v5disc=false` **[Optional]** -Disable P2P. Execution-layer peering is currently not supported in the Optimism protocol. Though this is not required, it saves resources since TX pool gossip is currently not available. +Disable P2P. This can save resources if you are only using op-node to sync the chain instead of using execution-layer syncing. ## Support Chains op-erigon supports every OP Stack chains listed in [superchain-registry](https://github.com/ethereum-optimism/superchain-registry). @@ -134,8 +134,7 @@ $ ./build/bin/erigon \ --rollup.historicalrpc="https://mainnet.optimism.io" \ --txpool.gossip.disable=true \ --chain=op-mainnet \ - --db.size.limit=8TB \ - --nodiscover + --db.size.limit=8TB ``` 2. Use the Docker image: You can get the official Docker image from [testinprod/op-erigon](https://hub.docker.com/r/testinprod/op-erigon). 3. Use the Helm chart: If you want to deploy op-erigon to the K8S cluster, you can use [Helm chart](https://artifacthub.io/packages/helm/op-charts/erigon). diff --git a/cl/beacon/beaconhttp/args.go b/cl/beacon/beaconhttp/args.go index 1701620b074..5bbb115171a 100644 --- a/cl/beacon/beaconhttp/args.go +++ b/cl/beacon/beaconhttp/args.go @@ -5,6 +5,7 @@ import ( "net/http" "regexp" "strconv" + "strings" "github.com/go-chi/chi/v5" "github.com/ledgerwatch/erigon-lib/common" @@ -168,9 +169,13 @@ func Uint64FromQueryParams(r *http.Request, name string) (*uint64, error) { // decode a list of strings from the query params func StringListFromQueryParams(r *http.Request, name string) ([]string, error) { - str := r.URL.Query().Get(name) - if str == "" { + values := r.URL.Query()[name] + if len(values) == 0 { return nil, nil } + + // Combine all values into a single string, separating by comma + str := strings.Join(values, ",") + return regexp.MustCompile(`\s*,\s*`).Split(str, -1), nil } diff --git a/cl/beacon/handler/blobs.go b/cl/beacon/handler/blobs.go index 18e11a98633..8b006ec4d4a 100644 --- a/cl/beacon/handler/blobs.go +++ b/cl/beacon/handler/blobs.go @@ -3,6 +3,7 @@ package handler import ( "fmt" "net/http" + "strconv" "github.com/ledgerwatch/erigon/cl/beacon/beaconhttp" "github.com/ledgerwatch/erigon/cl/cltypes" @@ -51,13 +52,33 @@ func (a *ApiHandler) GetEthV1BeaconBlobSidecars(w http.ResponseWriter, r *http.R if err != nil { return nil, err } - + strIdxs, err := beaconhttp.StringListFromQueryParams(r, "indices") + if err != nil { + return nil, err + } resp := solid.NewStaticListSSZ[*cltypes.BlobSidecar](696969, blobSidecarSSZLenght) if !found { return beaconhttp.NewBeaconResponse(resp), nil } - for _, v := range out { - resp.Append(v) + if len(strIdxs) == 0 { + for _, v := range out { + resp.Append(v) + } + } else { + included := make(map[uint64]struct{}) + for _, idx := range strIdxs { + i, err := strconv.ParseUint(idx, 10, 64) + if err != nil { + return nil, err + } + included[i] = struct{}{} + } + for _, v := range out { + if _, ok := included[v.Index]; ok { + resp.Append(v) + } + } } + return beaconhttp.NewBeaconResponse(resp), nil } diff --git a/cl/clparams/config.go b/cl/clparams/config.go index 20ef21434a9..e5980ab3cca 100644 --- a/cl/clparams/config.go +++ b/cl/clparams/config.go @@ -15,6 +15,7 @@ package clparams import ( "crypto/rand" + "encoding/binary" "fmt" "math" "math/big" @@ -315,7 +316,9 @@ func (b ConfigByte) MarshalJSON() ([]byte, error) { type ConfigForkVersion uint32 func (v ConfigForkVersion) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("\"0x%x\"", v)), nil + tmp := make([]byte, 4) + binary.BigEndian.PutUint32(tmp, uint32(v)) + return []byte(fmt.Sprintf("\"0x%x\"", tmp)), nil } // BeaconChainConfig contains constant configs for node to participate in beacon chain. diff --git a/cl/sentinel/service/service.go b/cl/sentinel/service/service.go index 0c52f9edc23..1bf9654fb53 100644 --- a/cl/sentinel/service/service.go +++ b/cl/sentinel/service/service.go @@ -96,7 +96,8 @@ func (s *SentinelServer) PublishGossip(_ context.Context, msg *sentinelrpc.Gossi gossip.TopicNameVoluntaryExit, gossip.TopicNameProposerSlashing, gossip.TopicNameSyncCommitteeContributionAndProof, - gossip.TopicNameAttesterSlashing: + gossip.TopicNameAttesterSlashing, + gossip.TopicNameBlsToExecutionChange: subscription = manager.GetMatchingSubscription(msg.Name) default: // check subnets diff --git a/cl/spectest/Makefile b/cl/spectest/Makefile index 68959bfba32..b507c4613b1 100644 --- a/cl/spectest/Makefile +++ b/cl/spectest/Makefile @@ -2,7 +2,7 @@ tests: - GIT_LFS_SKIP_SMUDGE=1 git clone https://github.com/ethereum/consensus-spec-tests + GIT_LFS_SKIP_SMUDGE=1 GIT_CLONE_PROTECTION_ACTIVE=false git clone https://github.com/ethereum/consensus-spec-tests cd consensus-spec-tests && git checkout 080c96fbbf3be58e75947debfeb9ba3b2b7c9748 && git lfs pull --exclude=tests/general,tests/minimal && cd .. mv consensus-spec-tests/tests . rm -rf consensus-spec-tests diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index d3202bf01ac..33daa9866f0 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -1621,6 +1621,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, chainConfig, genesisBlock, chainConfig.ChainID.Uint64(), + logger, ) maxBlockBroadcastPeers := func(header *types.Header) uint { return 0 } diff --git a/cmd/snapshots/manifest/manifest.go b/cmd/snapshots/manifest/manifest.go index 7ce76cc561a..fe36be79c27 100644 --- a/cmd/snapshots/manifest/manifest.go +++ b/cmd/snapshots/manifest/manifest.go @@ -12,12 +12,13 @@ import ( "strings" "time" + "github.com/urfave/cli/v2" + "github.com/ledgerwatch/erigon-lib/downloader" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon/cmd/snapshots/sync" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/turbo/logging" - "github.com/urfave/cli/v2" ) var ( @@ -286,7 +287,7 @@ func verifyManifest(ctx context.Context, srcSession *downloader.RCloneSession, v var extra string if len(manifestFiles) != 0 { - files := make([]string, len(manifestFiles)) + files := make([]string, 0, len(manifestFiles)) for file := range manifestFiles { files = append(files, file) @@ -296,7 +297,7 @@ func verifyManifest(ctx context.Context, srcSession *downloader.RCloneSession, v } if len(dirFiles) != 0 { - files := make([]string, len(dirFiles)) + files := make([]string, 0, len(dirFiles)) for file := range dirFiles { files = append(files, file) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 7c46dd795aa..abba345258e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -65,6 +65,7 @@ import ( "github.com/ledgerwatch/erigon/p2p/nat" "github.com/ledgerwatch/erigon/p2p/netutil" "github.com/ledgerwatch/erigon/params" + borsnaptype "github.com/ledgerwatch/erigon/polygon/bor/snaptype" "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/logging" ) @@ -627,11 +628,12 @@ var ( } NoDiscoverFlag = cli.BoolFlag{ Name: "nodiscover", - Usage: "Disables the peer discovery mechanism (manual peer addition)", + Usage: "Disables the v4 peer discovery mechanism (manual peer addition). Refer to --v5disc to configure v5 discovery protocol", } DiscoveryV5Flag = cli.BoolFlag{ Name: "v5disc", - Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism", + Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism (enabled by default). Use --v5disc=false to disable it", + Value: true, } NetrestrictFlag = cli.StringFlag{ Name: "netrestrict", @@ -1076,9 +1078,14 @@ var ( Usage: "Diagnostics HTTP server listening port", Value: 6060, } + DiagSpeedTestFlag = cli.BoolFlag{ + Name: "diagnostics.speedtest", + Usage: "Enable speed test", + Value: false, + } ) -var MetricFlags = []cli.Flag{&MetricsEnabledFlag, &MetricsHTTPFlag, &MetricsPortFlag, &DiagDisabledFlag, &DiagEndpointAddrFlag, &DiagEndpointPortFlag} +var MetricFlags = []cli.Flag{&MetricsEnabledFlag, &MetricsHTTPFlag, &MetricsPortFlag, &DiagDisabledFlag, &DiagEndpointAddrFlag, &DiagEndpointPortFlag, &DiagSpeedTestFlag} var DiagnosticsFlags = []cli.Flag{&DiagnosticsURLFlag, &DiagnosticsURLFlag, &DiagnosticsSessionsFlag} @@ -1658,6 +1665,7 @@ func setBorConfig(ctx *cli.Context, cfg *ethconfig.Config) { cfg.WithoutHeimdall = ctx.Bool(WithoutHeimdallFlag.Name) cfg.WithHeimdallMilestones = ctx.Bool(WithHeimdallMilestones.Name) cfg.WithHeimdallWaypointRecording = ctx.Bool(WithHeimdallWaypoints.Name) + borsnaptype.RecordWayPoints(cfg.WithHeimdallWaypointRecording) cfg.PolygonSync = ctx.Bool(PolygonSyncFlag.Name) } diff --git a/core/blockchain.go b/core/blockchain.go index fcdfb4de0dc..24424c914b1 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -217,7 +217,7 @@ func logReceipts(receipts types.Receipts, txns types.Transactions, cc *chain.Con return } - marshalled := make([]map[string]interface{}, len(receipts)) + marshalled := make([]map[string]interface{}, 0, len(receipts)) for i, receipt := range receipts { txn := txns[i] marshalled = append(marshalled, ethutils.MarshalReceipt(receipt, txn, cc, header, txn.Hash(), true)) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 4cb668adc31..51f747e1f00 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -21,7 +21,6 @@ import ( "context" "encoding/binary" "encoding/json" - "errors" "fmt" "math" "math/big" @@ -43,7 +42,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/ethdb/cbor" - "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/rlp" ) @@ -1097,116 +1095,6 @@ func PruneBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error { return nil } -// PruneBorBlocks - delete [1, to) old blocks after moving it to snapshots. -// keeps genesis in db: [1, to) -// doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs -// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty -func PruneBorBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int, SpanIdAt func(number uint64) uint64) error { - c, err := tx.Cursor(kv.BorEventNums) - if err != nil { - return err - } - defer c.Close() - var blockNumBytes [8]byte - binary.BigEndian.PutUint64(blockNumBytes[:], blockTo) - k, v, err := c.Seek(blockNumBytes[:]) - if err != nil { - return err - } - var eventIdTo uint64 = math.MaxUint64 - if k != nil { - eventIdTo = binary.BigEndian.Uint64(v) - } - c1, err := tx.RwCursor(kv.BorEvents) - if err != nil { - return err - } - defer c1.Close() - counter := blocksDeleteLimit - for k, _, err = c1.First(); err == nil && k != nil && counter > 0; k, _, err = c1.Next() { - eventId := binary.BigEndian.Uint64(k) - if eventId >= eventIdTo { - break - } - if err = c1.DeleteCurrent(); err != nil { - return err - } - counter-- - } - if err != nil { - return err - } - firstSpanToKeep := SpanIdAt(blockTo) - c2, err := tx.RwCursor(kv.BorSpans) - if err != nil { - return err - } - defer c2.Close() - counter = blocksDeleteLimit - for k, _, err := c2.First(); err == nil && k != nil && counter > 0; k, _, err = c2.Next() { - spanId := binary.BigEndian.Uint64(k) - if spanId >= firstSpanToKeep { - break - } - if err = c2.DeleteCurrent(); err != nil { - return err - } - counter-- - } - - checkpointCursor, err := tx.RwCursor(kv.BorCheckpoints) - if err != nil { - return err - } - - defer checkpointCursor.Close() - lastCheckpointToRemove, err := heimdall.CheckpointIdAt(tx, blockTo) - - if err != nil { - return err - } - - var checkpointIdBytes [8]byte - binary.BigEndian.PutUint64(checkpointIdBytes[:], uint64(lastCheckpointToRemove)) - for k, _, err := checkpointCursor.Seek(checkpointIdBytes[:]); err == nil && k != nil; k, _, err = checkpointCursor.Prev() { - if err = checkpointCursor.DeleteCurrent(); err != nil { - return err - } - } - - milestoneCursor, err := tx.RwCursor(kv.BorMilestones) - - if err != nil { - return err - } - - defer milestoneCursor.Close() - - var lastMilestoneToRemove heimdall.MilestoneId - - for blockCount := 1; err != nil && blockCount < blocksDeleteLimit; blockCount++ { - lastMilestoneToRemove, err = heimdall.MilestoneIdAt(tx, blockTo-uint64(blockCount)) - - if !errors.Is(err, heimdall.ErrMilestoneNotFound) { - return err - } else { - if blockCount == blocksDeleteLimit-1 { - return nil - } - } - } - - var milestoneIdBytes [8]byte - binary.BigEndian.PutUint64(milestoneIdBytes[:], uint64(lastMilestoneToRemove)) - for k, _, err := milestoneCursor.Seek(milestoneIdBytes[:]); err == nil && k != nil; k, _, err = milestoneCursor.Prev() { - if err = milestoneCursor.DeleteCurrent(); err != nil { - return err - } - } - - return nil -} - func TruncateCanonicalChain(ctx context.Context, db kv.RwTx, from uint64) error { return db.ForEach(kv.HeaderCanonical, hexutility.EncodeTs(from), func(k, _ []byte) error { return db.Delete(kv.HeaderCanonical, k) diff --git a/core/rawdb/blockio/block_writer.go b/core/rawdb/blockio/block_writer.go index e1e779cc0a8..52838b47fd1 100644 --- a/core/rawdb/blockio/block_writer.go +++ b/core/rawdb/blockio/block_writer.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/polygon/bor/bordb" "github.com/ledgerwatch/log/v3" ) @@ -116,5 +117,5 @@ func (w *BlockWriter) PruneBlocks(ctx context.Context, tx kv.RwTx, blockTo uint6 // doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs // doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty func (w *BlockWriter) PruneBorBlocks(ctx context.Context, tx kv.RwTx, blockTo uint64, blocksDeleteLimit int, SpanIdAt func(number uint64) uint64) error { - return rawdb.PruneBorBlocks(tx, blockTo, blocksDeleteLimit, SpanIdAt) + return bordb.PruneBorBlocks(tx, blockTo, blocksDeleteLimit, SpanIdAt) } diff --git a/core/vm/evm.go b/core/vm/evm.go index 88db712c40a..ca7832bc43e 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -227,6 +227,12 @@ func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, inp v := value if typ == STATICCALL { v = nil + } else if typ == DELEGATECALL { + // NOTE: caller must, at all times be a contract. It should never happen + // that caller is something other than a Contract. + parent := caller.(*Contract) + // DELEGATECALL inherits value from parent call + v = parent.value } if depth == 0 { evm.config.Tracer.CaptureStart(evm, caller.Address(), addr, isPrecompile, false /* create */, input, gas, v, code) diff --git a/diagnostics/logs.go b/diagnostics/logs.go index 420fa2c926a..f3d44e5c178 100644 --- a/diagnostics/logs.go +++ b/diagnostics/logs.go @@ -70,7 +70,7 @@ func writeLogsList(w http.ResponseWriter, dirPath string) { Size int64 `json:"size"` } - files := make([]file, len(infos)) + files := make([]file, 0, len(infos)) for _, fileInfo := range infos { files = append(files, file{Name: fileInfo.Name(), Size: fileInfo.Size()}) diff --git a/diagnostics/setup.go b/diagnostics/setup.go index 7ce8b3a6d53..6534ecac245 100644 --- a/diagnostics/setup.go +++ b/diagnostics/setup.go @@ -20,6 +20,7 @@ var ( metricsPortFlag = "metrics.port" pprofPortFlag = "pprof.port" pprofAddrFlag = "pprof.addr" + diagnoticsSpeedTestFlag = "diagnostics.speedtest" ) func Setup(ctx *cli.Context, node *node.ErigonNode, metricsMux *http.ServeMux, pprofMux *http.ServeMux) { @@ -48,7 +49,9 @@ func Setup(ctx *cli.Context, node *node.ErigonNode, metricsMux *http.ServeMux, p diagMux = SetupDiagnosticsEndpoint(nil, diagAddress) } - diagnostic := diaglib.NewDiagnosticClient(diagMux, node.Backend().DataDir()) + speedTest := ctx.Bool(diagnoticsSpeedTestFlag) + + diagnostic := diaglib.NewDiagnosticClient(diagMux, node.Backend().DataDir(), speedTest) diagnostic.Setup() SetupEndpoints(ctx, node, diagMux, diagnostic) diff --git a/erigon-lib/diagnostics/client.go b/erigon-lib/diagnostics/client.go index 284e46c1498..df5b04a76fc 100644 --- a/erigon-lib/diagnostics/client.go +++ b/erigon-lib/diagnostics/client.go @@ -10,6 +10,7 @@ import ( type DiagnosticClient struct { metricsMux *http.ServeMux dataDirPath string + speedTest bool syncStats SyncStatistics snapshotFileList SnapshoFilesList @@ -26,10 +27,11 @@ type DiagnosticClient struct { networkSpeedMutex sync.Mutex } -func NewDiagnosticClient(metricsMux *http.ServeMux, dataDirPath string) *DiagnosticClient { +func NewDiagnosticClient(metricsMux *http.ServeMux, dataDirPath string, speedTest bool) *DiagnosticClient { return &DiagnosticClient{ metricsMux: metricsMux, dataDirPath: dataDirPath, + speedTest: speedTest, syncStats: SyncStatistics{}, hardwareInfo: HardwareInfo{}, snapshotFileList: SnapshoFilesList{}, diff --git a/erigon-lib/diagnostics/speedtest.go b/erigon-lib/diagnostics/speedtest.go index d2c463bbbbb..77da08ac7a6 100644 --- a/erigon-lib/diagnostics/speedtest.go +++ b/erigon-lib/diagnostics/speedtest.go @@ -8,22 +8,11 @@ import ( ) func (d *DiagnosticClient) setupSpeedtestDiagnostics(rootCtx context.Context) { - ticker := time.NewTicker(30 * time.Minute) go func() { - d.networkSpeedMutex.Lock() - d.networkSpeed = d.runSpeedTest(rootCtx) - d.networkSpeedMutex.Unlock() - - for { - select { - case <-ticker.C: - d.networkSpeedMutex.Lock() - d.networkSpeed = d.runSpeedTest(rootCtx) - d.networkSpeedMutex.Unlock() - case <-rootCtx.Done(): - ticker.Stop() - return - } + if d.speedTest { + d.networkSpeedMutex.Lock() + d.networkSpeed = d.runSpeedTest(rootCtx) + d.networkSpeedMutex.Unlock() } }() } diff --git a/erigon-lib/txpool/pool.go b/erigon-lib/txpool/pool.go index d5b3d793773..6813edfef20 100644 --- a/erigon-lib/txpool/pool.go +++ b/erigon-lib/txpool/pool.go @@ -943,10 +943,8 @@ func (p *TxPool) validateTx(txn *types.TxSlot, isLocal bool, stateCache kvcache. } isShanghai := p.isShanghai() || p.isAgra() - if isShanghai { - if txn.DataLen > fixedgas.MaxInitCodeSize { - return txpoolcfg.InitCodeTooLarge - } + if isShanghai && txn.Creation && txn.DataLen > fixedgas.MaxInitCodeSize { + return txpoolcfg.InitCodeTooLarge // EIP-3860 } if txn.Type == types.BlobTxType { if !p.isCancun() { diff --git a/erigon-lib/txpool/pool_test.go b/erigon-lib/txpool/pool_test.go index c36f2640d02..6af1515c46a 100644 --- a/erigon-lib/txpool/pool_test.go +++ b/erigon-lib/txpool/pool_test.go @@ -645,26 +645,43 @@ func TestShanghaiValidateTx(t *testing.T) { expected txpoolcfg.DiscardReason dataLen int isShanghai bool + creation bool }{ "no shanghai": { expected: txpoolcfg.Success, dataLen: 32, isShanghai: false, + creation: true, }, "shanghai within bounds": { expected: txpoolcfg.Success, dataLen: 32, isShanghai: true, + creation: true, }, - "shanghai exactly on bound": { + "shanghai exactly on bound - create tx": { expected: txpoolcfg.Success, dataLen: fixedgas.MaxInitCodeSize, isShanghai: true, + creation: true, }, - "shanghai one over bound": { + "shanghai one over bound - create tx": { expected: txpoolcfg.InitCodeTooLarge, dataLen: fixedgas.MaxInitCodeSize + 1, isShanghai: true, + creation: true, + }, + "shanghai exactly on bound - calldata tx": { + expected: txpoolcfg.Success, + dataLen: fixedgas.MaxInitCodeSize, + isShanghai: true, + creation: false, + }, + "shanghai one over bound - calldata tx": { + expected: txpoolcfg.Success, + dataLen: fixedgas.MaxInitCodeSize + 1, + isShanghai: true, + creation: false, }, } @@ -700,7 +717,7 @@ func TestShanghaiValidateTx(t *testing.T) { FeeCap: *uint256.NewInt(21000), Gas: 500000, SenderID: 0, - Creation: true, + Creation: test.creation, } txns := types.TxSlots{ diff --git a/eth/backend.go b/eth/backend.go index e017b510a8d..9e057724515 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -590,6 +590,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger chainConfig, genesis, backend.config.NetworkID, + logger, ) // limit "new block" broadcasts to at most 10 random peers at time @@ -1694,7 +1695,7 @@ func (s *Ethereum) DataDir() string { // setBorDefaultMinerGasPrice enforces Miner.GasPrice to be equal to BorDefaultMinerGasPrice (30gwei by default) func setBorDefaultMinerGasPrice(chainConfig *chain.Config, config *ethconfig.Config, logger log.Logger) { - if chainConfig.Bor != nil && config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(ethconfig.BorDefaultMinerGasPrice) != 0 { + if chainConfig.Bor != nil && (config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(ethconfig.BorDefaultMinerGasPrice) != 0) { logger.Warn("Sanitizing invalid bor miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.BorDefaultMinerGasPrice) config.Miner.GasPrice = ethconfig.BorDefaultMinerGasPrice } diff --git a/eth/ethutils/receipt.go b/eth/ethutils/receipt.go index f29886fe079..964019735d6 100644 --- a/eth/ethutils/receipt.go +++ b/eth/ethutils/receipt.go @@ -55,11 +55,11 @@ func MarshalReceipt( } if !chainConfig.IsLondon(header.Number.Uint64()) { - fields["effectiveGasPrice"] = hexutil.Uint64(txn.GetPrice().Uint64()) + fields["effectiveGasPrice"] = (*hexutil.Big)(txn.GetPrice().ToBig()) } else { baseFee, _ := uint256.FromBig(header.BaseFee) gasPrice := new(big.Int).Add(header.BaseFee, txn.GetEffectiveGasTip(baseFee).ToBig()) - fields["effectiveGasPrice"] = hexutil.Uint64(gasPrice.Uint64()) + fields["effectiveGasPrice"] = (*hexutil.Big)(gasPrice) } // Assign receipt status. @@ -101,7 +101,7 @@ func MarshalReceipt( if err != nil { log.Error(err.Error()) } - fields["blobGasPrice"] = blobGasPrice + fields["blobGasPrice"] = (*hexutil.Big)(blobGasPrice.ToBig()) fields["blobGasUsed"] = hexutil.Uint64(misc.GetBlobGasUsed(numBlobs)) } } diff --git a/eth/stagedsync/stage_bor_heimdall.go b/eth/stagedsync/stage_bor_heimdall.go index 850217b1f7a..07fc2ce5bf2 100644 --- a/eth/stagedsync/stage_bor_heimdall.go +++ b/eth/stagedsync/stage_bor_heimdall.go @@ -30,6 +30,7 @@ import ( "github.com/ledgerwatch/erigon/polygon/bor/borcfg" "github.com/ledgerwatch/erigon/polygon/bor/finality" "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" + borsnaptype "github.com/ledgerwatch/erigon/polygon/bor/snaptype" "github.com/ledgerwatch/erigon/polygon/bor/valset" "github.com/ledgerwatch/erigon/polygon/heimdall" "github.com/ledgerwatch/erigon/turbo/services" @@ -890,63 +891,67 @@ func BorHeimdallUnwind(u *UnwindState, ctx context.Context, _ *StageState, tx kv } // Removing checkpoints - if len(cfg.unwindTypes) == 0 || slices.Contains(cfg.unwindTypes, "checkpoints") { - checkpointCursor, err := tx.RwCursor(kv.BorCheckpoints) + if borsnaptype.CheckpointsEnabled() { + if len(cfg.unwindTypes) == 0 || slices.Contains(cfg.unwindTypes, "checkpoints") { + checkpointCursor, err := tx.RwCursor(kv.BorCheckpoints) - if err != nil { - return err - } - - defer checkpointCursor.Close() - lastCheckpointToKeep, err := heimdall.CheckpointIdAt(tx, u.UnwindPoint) - hasCheckpoints := true - - if err != nil { - if !errors.Is(err, heimdall.ErrCheckpointNotFound) { + if err != nil { return err } - hasCheckpoints = false - } + defer checkpointCursor.Close() + lastCheckpointToKeep, err := heimdall.CheckpointIdAt(tx, u.UnwindPoint) + hasCheckpoints := true - if hasCheckpoints { - var checkpointIdBytes [8]byte - binary.BigEndian.PutUint64(checkpointIdBytes[:], uint64(lastCheckpointToKeep+1)) - for k, _, err := checkpointCursor.Seek(checkpointIdBytes[:]); err == nil && k != nil; k, _, err = checkpointCursor.Next() { - if err = checkpointCursor.DeleteCurrent(); err != nil { + if err != nil { + if !errors.Is(err, heimdall.ErrCheckpointNotFound) { return err } - } - } - } - // Removing milestones - if len(cfg.unwindTypes) == 0 || slices.Contains(cfg.unwindTypes, "milestones") { - milestoneCursor, err := tx.RwCursor(kv.BorMilestones) + hasCheckpoints = false + } - if err != nil { - return err + if hasCheckpoints { + var checkpointIdBytes [8]byte + binary.BigEndian.PutUint64(checkpointIdBytes[:], uint64(lastCheckpointToKeep+1)) + for k, _, err := checkpointCursor.Seek(checkpointIdBytes[:]); err == nil && k != nil; k, _, err = checkpointCursor.Next() { + if err = checkpointCursor.DeleteCurrent(); err != nil { + return err + } + } + } } + } - defer milestoneCursor.Close() - lastMilestoneToKeep, err := heimdall.MilestoneIdAt(tx, u.UnwindPoint) - hasMilestones := true + if borsnaptype.MilestonesEnabled() { + // Removing milestones + if len(cfg.unwindTypes) == 0 || slices.Contains(cfg.unwindTypes, "milestones") { + milestoneCursor, err := tx.RwCursor(kv.BorMilestones) - if err != nil { - if !errors.Is(err, heimdall.ErrMilestoneNotFound) { + if err != nil { return err } - hasMilestones = false - } + defer milestoneCursor.Close() + lastMilestoneToKeep, err := heimdall.MilestoneIdAt(tx, u.UnwindPoint) + hasMilestones := true - if hasMilestones { - var milestoneIdBytes [8]byte - binary.BigEndian.PutUint64(milestoneIdBytes[:], uint64(lastMilestoneToKeep+1)) - for k, _, err := milestoneCursor.Seek(milestoneIdBytes[:]); err == nil && k != nil; k, _, err = milestoneCursor.Next() { - if err = milestoneCursor.DeleteCurrent(); err != nil { + if err != nil { + if !errors.Is(err, heimdall.ErrMilestoneNotFound) { return err } + + hasMilestones = false + } + + if hasMilestones { + var milestoneIdBytes [8]byte + binary.BigEndian.PutUint64(milestoneIdBytes[:], uint64(lastMilestoneToKeep+1)) + for k, _, err := milestoneCursor.Seek(milestoneIdBytes[:]); err == nil && k != nil; k, _, err = milestoneCursor.Next() { + if err = milestoneCursor.DeleteCurrent(); err != nil { + return err + } + } } } } diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json index 9ba67a9bf46..eb7539d4613 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json @@ -70,7 +70,8 @@ "input": "0x7d65837a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", "output": "0x0000000000000000000000000000000000000000000000000000000000000001", "to": "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value":"0x0" } ], "from": "0x269296dddce321a6bcbaa2f0181127593d732cba", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json index bc13bc25068..e9ad6df6ab8 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json @@ -163,7 +163,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x24d4e90a0000000000000000000000000000000000000000000000020000000000000000", "output": "0x000000000000000000000000000000000000000000000000b17217f7d1cf79ab", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value":"0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -172,7 +173,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x872fb2b5000000000000000000000000000000000000000000000000c330b3f7006420b8", "output": "0x00000000000000000000000000000000000000000000000224bf7df2c80f0878", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value":"0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -181,7 +183,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x872fb2b50000000000000000000000000000000000000000000000000000000000000000", "output": "0x00000000000000000000000000000000000000000000000100000016aee6e8ef", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value":"0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -190,7 +193,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x24d4e90a00000000000000000000000000000000000000000000000324bf7e0976f5f167", "output": "0x0000000000000000000000000000000000000000000000012535c5e5f87ee0d2", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value":"0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -199,7 +203,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x872fb2b5000000000000000000000000000000000000000000000000c330b3f7006420b8", "output": "0x00000000000000000000000000000000000000000000000224bf7df2c80f0878", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value":"0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -208,7 +213,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x872fb2b500000000000000000000000000000000000000000000000237d37fe5d297a500", "output": "0x0000000000000000000000000000000000000000000000093088c407fcbbce38", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value":"0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -217,7 +223,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x24d4e90a00000000000000000000000000000000000000000000000b554841fac4cad6b0", "output": "0x0000000000000000000000000000000000000000000000026d7fc130d6a74cbe", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value":"0x0" } ], "value": "0x0", @@ -390,7 +397,8 @@ "data": "0x000000000000000000000000000000000000000000000000de0b6b3a76400000" } ], - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" } ], "value": "0x0", diff --git a/go.mod b/go.mod index b6f431d2671..0407bab2e1c 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf github.com/edsrzf/mmap-go v1.1.0 github.com/emicklei/dot v1.6.1 - github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240522134500-19555bdbdc95 + github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240603085035-9c8f6081266e github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 github.com/gfx-labs/sse v0.0.0-20231226060816-f747e26a9baa diff --git a/go.sum b/go.sum index 8bd47ce6746..8bd1d3c0339 100644 --- a/go.sum +++ b/go.sum @@ -274,8 +274,8 @@ github.com/erigontech/silkworm-go v0.18.0 h1:j56p61xZHBFhZGH1OixlGU8KcfjHzcw9pjA github.com/erigontech/silkworm-go v0.18.0/go.mod h1:O50ux0apICEVEGyRWiE488K8qz8lc3PA/SXbQQAc8SU= github.com/erigontech/torrent v1.54.2-alpha-8 h1:MQobu6sUZCFbmWpsB7GqAh0IWs7VAZ370POaVxlApIk= github.com/erigontech/torrent v1.54.2-alpha-8/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= -github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240522134500-19555bdbdc95 h1:GjXKQg6u6WkEIcY0dvW2IKhMRY8cVjwdw+rNKhduAo8= -github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240522134500-19555bdbdc95/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0= +github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240603085035-9c8f6081266e h1:PJWaF/45dMhO7xdzSwuZmwIIBwnqnPr84oFNmmnpGNs= +github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240603085035-9c8f6081266e/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= diff --git a/migrations/prohibit_new_downloads2.go b/migrations/prohibit_new_downloads2.go index 22cd00372a9..e278fa71113 100644 --- a/migrations/prohibit_new_downloads2.go +++ b/migrations/prohibit_new_downloads2.go @@ -45,7 +45,7 @@ var ProhibitNewDownloadsLock2 = Migration{ locked = append(locked, t.Name()) } - for _, t := range borsnaptype.BorSnapshotTypes { + for _, t := range borsnaptype.BorSnapshotTypes() { locked = append(locked, t.Name()) } diff --git a/p2p/sentry/sentry_multi_client/sentry_multi_client.go b/p2p/sentry/sentry_multi_client/sentry_multi_client.go index 5754e57a2ee..69c50ac9060 100644 --- a/p2p/sentry/sentry_multi_client/sentry_multi_client.go +++ b/p2p/sentry/sentry_multi_client/sentry_multi_client.go @@ -156,9 +156,7 @@ func SentryReconnectAndPumpStreamLoop[TMessage interface{}]( statusData, err := statusDataFactory(ctx) if err != nil { - if !errors.Is(err, sentry.ErrNoHead) { - logger.Error("SentryReconnectAndPumpStreamLoop: statusDataFactory error", "stream", streamName, "err", err) - } + logger.Error("SentryReconnectAndPumpStreamLoop: statusDataFactory error", "stream", streamName, "err", err) time.Sleep(time.Second) continue } diff --git a/p2p/sentry/status_data_provider.go b/p2p/sentry/status_data_provider.go index 91137e5c321..83da0d3bad3 100644 --- a/p2p/sentry/status_data_provider.go +++ b/p2p/sentry/status_data_provider.go @@ -7,6 +7,7 @@ import ( "math/big" "github.com/holiman/uint256" + "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -32,8 +33,11 @@ type StatusDataProvider struct { networkId uint64 genesisHash libcommon.Hash + genesisHead ChainHead heightForks []uint64 timeForks []uint64 + + logger log.Logger } func NewStatusDataProvider( @@ -41,11 +45,14 @@ func NewStatusDataProvider( chainConfig *chain.Config, genesis *types.Block, networkId uint64, + logger log.Logger, ) *StatusDataProvider { s := &StatusDataProvider{ db: db, networkId: networkId, genesisHash: genesis.Hash(), + genesisHead: makeGenesisChainHead(genesis), + logger: logger, } s.heightForks, s.timeForks = forkid.GatherForks(chainConfig, genesis.Time()) @@ -53,6 +60,32 @@ func NewStatusDataProvider( return s } +func uint256FromBigInt(num *big.Int) (*uint256.Int, error) { + if num == nil { + num = new(big.Int) + } + num256 := new(uint256.Int) + overflow := num256.SetFromBig(num) + if overflow { + return nil, fmt.Errorf("uint256FromBigInt: big.Int greater than 2^256-1") + } + return num256, nil +} + +func makeGenesisChainHead(genesis *types.Block) ChainHead { + genesisDifficulty, err := uint256FromBigInt(genesis.Difficulty()) + if err != nil { + panic(fmt.Errorf("makeGenesisChainHead: difficulty conversion error: %w", err)) + } + + return ChainHead{ + HeadHeight: genesis.NumberU64(), + HeadTime: genesis.Time(), + HeadHash: genesis.Hash(), + HeadTd: genesisDifficulty, + } +} + func (s *StatusDataProvider) makeStatusData(head ChainHead) *proto_sentry.StatusData { return &proto_sentry.StatusData{ NetworkId: s.networkId, @@ -71,6 +104,10 @@ func (s *StatusDataProvider) makeStatusData(head ChainHead) *proto_sentry.Status func (s *StatusDataProvider) GetStatusData(ctx context.Context) (*proto_sentry.StatusData, error) { chainHead, err := ReadChainHead(ctx, s.db) if err != nil { + if errors.Is(err, ErrNoHead) { + s.logger.Warn("sentry.StatusDataProvider: The canonical chain current header not found in the database. Check the database consistency. Using genesis as a fallback.") + return s.makeStatusData(s.genesisHead), nil + } return nil, err } return s.makeStatusData(chainHead), err @@ -84,23 +121,15 @@ func ReadChainHeadWithTx(tx kv.Tx) (ChainHead, error) { height := header.Number.Uint64() hash := header.Hash() - - var time uint64 - if header != nil { - time = header.Time - } + time := header.Time td, err := rawdb.ReadTd(tx, hash, height) if err != nil { return ChainHead{}, fmt.Errorf("ReadChainHead: ReadTd error at height %d and hash %s: %w", height, hash, err) } - if td == nil { - td = new(big.Int) - } - td256 := new(uint256.Int) - overflow := td256.SetFromBig(td) - if overflow { - return ChainHead{}, fmt.Errorf("ReadChainHead: total difficulty higher than 2^256-1") + td256, err := uint256FromBigInt(td) + if err != nil { + return ChainHead{}, fmt.Errorf("ReadChainHead: total difficulty conversion error: %w", err) } return ChainHead{height, time, hash, td256}, nil diff --git a/p2p/server.go b/p2p/server.go index 1897da93c45..5b2acd1d639 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -675,6 +675,7 @@ func (srv *Server) setupDiscovery(ctx context.Context) error { if err != nil { return err } + srv.discmix.AddSource(srv.DiscV5.RandomNodes()) } return nil } diff --git a/params/bootnodes.go b/params/bootnodes.go index 94d35e08c74..20132b92bbe 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -151,6 +151,8 @@ func KnownDNSNetwork(genesis libcommon.Hash, protocol string) string { net = "goerli" case SepoliaGenesisHash: net = "sepolia" + case HoleskyGenesisHash: + net = "holesky" default: return "" } diff --git a/params/superchain.go b/params/superchain.go index c1695f7c66b..41bd6a32cea 100644 --- a/params/superchain.go +++ b/params/superchain.go @@ -36,7 +36,7 @@ var ( chaosnetRegolithTime = big.NewInt(1692156862) ) -var OPStackSupport = ProtocolVersionV0{Build: [8]byte{}, Major: 6, Minor: 0, Patch: 0, PreRelease: 0}.Encode() +var OPStackSupport = ProtocolVersionV0{Build: [8]byte{}, Major: 7, Minor: 0, Patch: 0, PreRelease: 0}.Encode() // OPStackChainConfigByName loads chain config corresponding to the chain name from superchain registry. // This implementation is based on optimism monorepo(https://github.com/ethereum-optimism/optimism/blob/op-node/v1.4.1/op-node/chaincfg/chains.go#L59) diff --git a/params/version.go b/params/version.go index aed1ed2924d..9c4e0038965 100644 --- a/params/version.go +++ b/params/version.go @@ -34,7 +34,7 @@ var ( const ( VersionMajor = 2 // Major version component of the current release VersionMinor = 60 // Minor version component of the current release - VersionMicro = 0 // Patch version component of the current release + VersionMicro = 1 // Patch version component of the current release VersionModifier = "" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" VersionKeyFinished = "ErigonVersionFinished" diff --git a/polygon/bor/bordb/prune.go b/polygon/bor/bordb/prune.go new file mode 100644 index 00000000000..81c2f264038 --- /dev/null +++ b/polygon/bor/bordb/prune.go @@ -0,0 +1,125 @@ +package bordb + +import ( + "encoding/binary" + "errors" + "math" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/polygon/bor/snaptype" + "github.com/ledgerwatch/erigon/polygon/heimdall" +) + +// PruneBorBlocks - delete [1, to) old blocks after moving it to snapshots. +// keeps genesis in db: [1, to) +// doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs +// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty +func PruneBorBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int, SpanIdAt func(number uint64) uint64) error { + c, err := tx.Cursor(kv.BorEventNums) + if err != nil { + return err + } + defer c.Close() + var blockNumBytes [8]byte + binary.BigEndian.PutUint64(blockNumBytes[:], blockTo) + k, v, err := c.Seek(blockNumBytes[:]) + if err != nil { + return err + } + var eventIdTo uint64 = math.MaxUint64 + if k != nil { + eventIdTo = binary.BigEndian.Uint64(v) + } + c1, err := tx.RwCursor(kv.BorEvents) + if err != nil { + return err + } + defer c1.Close() + counter := blocksDeleteLimit + for k, _, err = c1.First(); err == nil && k != nil && counter > 0; k, _, err = c1.Next() { + eventId := binary.BigEndian.Uint64(k) + if eventId >= eventIdTo { + break + } + if err = c1.DeleteCurrent(); err != nil { + return err + } + counter-- + } + if err != nil { + return err + } + firstSpanToKeep := SpanIdAt(blockTo) + c2, err := tx.RwCursor(kv.BorSpans) + if err != nil { + return err + } + defer c2.Close() + counter = blocksDeleteLimit + for k, _, err := c2.First(); err == nil && k != nil && counter > 0; k, _, err = c2.Next() { + spanId := binary.BigEndian.Uint64(k) + if spanId >= firstSpanToKeep { + break + } + if err = c2.DeleteCurrent(); err != nil { + return err + } + counter-- + } + + if snaptype.CheckpointsEnabled() { + checkpointCursor, err := tx.RwCursor(kv.BorCheckpoints) + if err != nil { + return err + } + + defer checkpointCursor.Close() + lastCheckpointToRemove, err := heimdall.CheckpointIdAt(tx, blockTo) + + if err != nil { + return err + } + + var checkpointIdBytes [8]byte + binary.BigEndian.PutUint64(checkpointIdBytes[:], uint64(lastCheckpointToRemove)) + for k, _, err := checkpointCursor.Seek(checkpointIdBytes[:]); err == nil && k != nil; k, _, err = checkpointCursor.Prev() { + if err = checkpointCursor.DeleteCurrent(); err != nil { + return err + } + } + } + + if snaptype.MilestonesEnabled() { + milestoneCursor, err := tx.RwCursor(kv.BorMilestones) + + if err != nil { + return err + } + + defer milestoneCursor.Close() + + var lastMilestoneToRemove heimdall.MilestoneId + + for blockCount := 1; err != nil && blockCount < blocksDeleteLimit; blockCount++ { + lastMilestoneToRemove, err = heimdall.MilestoneIdAt(tx, blockTo-uint64(blockCount)) + + if !errors.Is(err, heimdall.ErrMilestoneNotFound) { + return err + } else { + if blockCount == blocksDeleteLimit-1 { + return nil + } + } + } + + var milestoneIdBytes [8]byte + binary.BigEndian.PutUint64(milestoneIdBytes[:], uint64(lastMilestoneToRemove)) + for k, _, err := milestoneCursor.Seek(milestoneIdBytes[:]); err == nil && k != nil; k, _, err = milestoneCursor.Prev() { + if err = milestoneCursor.DeleteCurrent(); err != nil { + return err + } + } + } + + return nil +} diff --git a/polygon/bor/finality/whitelist/milestone.go b/polygon/bor/finality/whitelist/milestone.go index b4777c13cae..a79c712358e 100644 --- a/polygon/bor/finality/whitelist/milestone.go +++ b/polygon/bor/finality/whitelist/milestone.go @@ -154,6 +154,7 @@ func (m *milestone) UnlockSprint(endBlockNum uint64) { } m.Locked = false + m.purgeMilestoneIDsList() err := rawdb.WriteLockField(m.db, m.Locked, m.LockedMilestoneNumber, m.LockedMilestoneHash, m.LockedMilestoneIDs) @@ -213,6 +214,12 @@ func (m *milestone) GetMilestoneIDsList() []string { // This is remove the milestoneIDs stored in the list. func (m *milestone) purgeMilestoneIDsList() { + // try is used here as the finality lock is preserved over calls - so the lock state + // is not clearly defined in the local code - this likely needs to be revised + if m.finality.TryLock() { + defer m.finality.Unlock() + } + m.LockedMilestoneIDs = make(map[string]struct{}) } diff --git a/polygon/bor/finality/whitelist/service_test.go b/polygon/bor/finality/whitelist/service_test.go index 62fe3651dea..4e0de45b663 100644 --- a/polygon/bor/finality/whitelist/service_test.go +++ b/polygon/bor/finality/whitelist/service_test.go @@ -548,11 +548,7 @@ func TestPropertyBasedTestingMilestone(t *testing.T) { } fitlerFn := func(i uint64) bool { - if i <= uint64(1000) { - return true - } - - return false + return i <= uint64(1000) } var ( diff --git a/polygon/bor/snaptype/types.go b/polygon/bor/snaptype/types.go index 5a2e32b273d..5931b89290f 100644 --- a/polygon/bor/snaptype/types.go +++ b/polygon/bor/snaptype/types.go @@ -31,7 +31,11 @@ import ( ) func init() { - borTypes := append(coresnaptype.BlockSnapshotTypes, BorSnapshotTypes...) + initTypes() +} + +func initTypes() { + borTypes := append(coresnaptype.BlockSnapshotTypes, BorSnapshotTypes()...) snapcfg.RegisterKnownTypes(networkname.MumbaiChainName, borTypes) snapcfg.RegisterKnownTypes(networkname.AmoyChainName, borTypes) @@ -402,10 +406,43 @@ var ( return buildValueIndex(ctx, sn, salt, d, firstMilestoneId, tmpDir, p, lvl, logger) }), ) - - BorSnapshotTypes = []snaptype.Type{BorEvents, BorSpans, BorCheckpoints, BorMilestones} ) +var recordWaypoints bool + +func RecordWayPoints(value bool) { + recordWaypoints = value + initTypes() +} + +func BorSnapshotTypes() []snaptype.Type { + if recordWaypoints { + return []snaptype.Type{BorEvents, BorSpans, BorCheckpoints, BorMilestones} + } + + return []snaptype.Type{BorEvents, BorSpans} +} + +func CheckpointsEnabled() bool { + for _, snapType := range BorSnapshotTypes() { + if snapType.Enum() == BorCheckpoints.Enum() { + return true + } + } + + return false +} + +func MilestonesEnabled() bool { + for _, snapType := range BorSnapshotTypes() { + if snapType.Enum() == BorMilestones.Enum() { + return true + } + } + + return false +} + func extractValueRange(ctx context.Context, table string, valueFrom, valueTo uint64, db kv.RoDB, collect func([]byte) error, workers int, lvl log.Lvl, logger log.Logger) (uint64, error) { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() diff --git a/polygon/heimdall/checkpoint.go b/polygon/heimdall/checkpoint.go index a1d7a763c54..b26d5697866 100644 --- a/polygon/heimdall/checkpoint.go +++ b/polygon/heimdall/checkpoint.go @@ -62,7 +62,7 @@ func (c *Checkpoint) MarshalJSON() ([]byte, error) { Proposer libcommon.Address `json:"proposer"` StartBlock *big.Int `json:"start_block"` EndBlock *big.Int `json:"end_block"` - RootHash libcommon.Hash `json:"hash"` + RootHash libcommon.Hash `json:"root_hash"` ChainID string `json:"bor_chain_id"` Timestamp uint64 `json:"timestamp"` }{ @@ -79,7 +79,7 @@ func (c *Checkpoint) MarshalJSON() ([]byte, error) { func (c *Checkpoint) UnmarshalJSON(b []byte) error { dto := struct { WaypointFields - RootHash libcommon.Hash `json:"hash"` + RootHash libcommon.Hash `json:"root_hash"` Id CheckpointId `json:"id"` }{} diff --git a/rpc/handler.go b/rpc/handler.go index 5d0e800bdff..2c7a95d7ec0 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -578,9 +578,28 @@ func writeNilIfNotPresent(stream *jsoniter.Stream) { } else { hasNil = false } - if !hasNil { - stream.WriteNil() + if hasNil { + // not needed + return + } + + var validJsonEnd bool + if len(b) > 0 { + // assumption is that api call handlers would write valid json in case of errors + // we are not guaranteed that they did write valid json if last elem is "}" or "]" + // since we don't check json nested-ness + // however appending "null" after "}" or "]" does not help much either + lastIdx := len(b) - 1 + validJsonEnd = b[lastIdx] == '}' || b[lastIdx] == ']' } + if validJsonEnd { + // not needed + return + } + + // does not have nil ending + // does not have valid json + stream.WriteNil() } // unsubscribe is the callback function for all *_unsubscribe calls. diff --git a/rpc/handler_test.go b/rpc/handler_test.go index 3b0e4a0441b..c15b67158dc 100644 --- a/rpc/handler_test.go +++ b/rpc/handler_test.go @@ -29,6 +29,10 @@ func TestHandlerDoesNotDoubleWriteNull(t *testing.T) { params: []byte("[3]"), expected: `{"jsonrpc":"2.0","id":1,"result":{}}`, }, + "err_with_valid_json": { + params: []byte("[4]"), + expected: `{"jsonrpc":"2.0","id":1,"result":{"structLogs":[]},"error":{"code":-32000,"message":"id 4"}}`, + }, } for name, testParams := range tests { @@ -50,7 +54,17 @@ func TestHandlerDoesNotDoubleWriteNull(t *testing.T) { if id == 2 { return fmt.Errorf("id 2") } - stream.WriteEmptyObject() + if id == 3 { + stream.WriteEmptyObject() + return nil + } + if id == 4 { + stream.WriteObjectStart() + stream.WriteObjectField("structLogs") + stream.WriteEmptyArray() + stream.WriteObjectEnd() + return fmt.Errorf("id 4") + } return nil } diff --git a/turbo/adapter/ethapi/api.go b/turbo/adapter/ethapi/api.go index b734cf07a3e..72012b3ea76 100644 --- a/turbo/adapter/ethapi/api.go +++ b/turbo/adapter/ethapi/api.go @@ -112,7 +112,7 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type gasFeeCap, gasTipCap = gasPrice, gasPrice } else { // User specified 1559 gas fields (or none), use those - gasFeeCap = baseFee + gasFeeCap = uint256.MustFromBig(baseFee.ToBig()) if args.MaxFeePerGas != nil { overflow := gasFeeCap.SetFromBig(args.MaxFeePerGas.ToInt()) if overflow { @@ -133,7 +133,11 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type } } if args.MaxFeePerBlobGas != nil { - maxFeePerBlobGas.SetFromBig(args.MaxFeePerBlobGas.ToInt()) + blobFee, overflow := uint256.FromBig(args.MaxFeePerBlobGas.ToInt()) + if overflow { + return types.Message{}, fmt.Errorf("args.MaxFeePerBlobGas higher than 2^256-1") + } + maxFeePerBlobGas = blobFee } } diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go index f5e591264bb..c6a875b878a 100644 --- a/turbo/jsonrpc/eth_api.go +++ b/turbo/jsonrpc/eth_api.go @@ -90,7 +90,7 @@ type EthAPI interface { // Sending related (see ./eth_call.go) Call(ctx context.Context, args ethapi2.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *ethapi2.StateOverrides) (hexutility.Bytes, error) - EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) + EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs) (hexutil.Uint64, error) SendRawTransaction(ctx context.Context, encodedTx hexutility.Bytes) (common.Hash, error) SendTransaction(_ context.Context, txObject interface{}) (common.Hash, error) Sign(ctx context.Context, _ common.Address, _ hexutility.Bytes) (hexutility.Bytes, error) diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index cd578cf25c1..a160f1c44ed 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -130,7 +130,7 @@ func headerByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.Block } // EstimateGas implements eth_estimateGas. Returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain. -func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { +func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs) (hexutil.Uint64, error) { var args ethapi2.CallArgs // if we actually get CallArgs here, we use them if argsOrNil != nil { @@ -154,56 +154,50 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs args.From = new(libcommon.Address) } - bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - if blockNrOrHash != nil { - bNrOrHash = *blockNrOrHash + chainConfig, err := api.chainConfig(ctx, dbtx) + if err != nil { + return 0, fmt.Errorf("read chain config: %v", err) } - // Handle pre-bedrock blocks - blockNum, err := api.blockNumberFromBlockNumberOrHash(dbtx, &bNrOrHash) + latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber(latestNumOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return 0, err } - chainConfig, err := api.chainConfig(ctx, dbtx) - if err != nil { - return 0, fmt.Errorf("read chain config: %v", err) - } - if chainConfig.IsOptimismPreBedrock(blockNum) { + + if chainConfig.IsOptimismPreBedrock(latestCanBlockNumber) { if api.historicalRPCService == nil { return 0, rpc.ErrNoHistoricalFallback } var result hexutil.Uint64 - if err := api.relayToHistoricalBackend(ctx, &result, "eth_estimateGas", args, hexutil.EncodeUint64(blockNum)); err != nil { + if err := api.relayToHistoricalBackend(ctx, &result, "eth_estimateGas", args, hexutil.EncodeUint64(latestCanBlockNumber)); err != nil { return 0, fmt.Errorf("historical backend error: %w", err) } return result, nil } - // Determine the highest gas limit can be used during the estimation. - if args.Gas != nil && uint64(*args.Gas) >= params.TxGas { - hi = uint64(*args.Gas) - } else { - // Retrieve the block to act as the gas ceiling - h, err := headerByNumberOrHash(ctx, dbtx, bNrOrHash, api) + // try and get the block from the lru cache first then try DB before failing + block := api.tryBlockFromLru(latestCanHash) + if block == nil { + block, err = api.blockWithSenders(ctx, dbtx, latestCanHash, latestCanBlockNumber) if err != nil { return 0, err } - if h == nil { - // if a block number was supplied and there is no header return 0 - if blockNrOrHash != nil { - return 0, nil - } + } + if block == nil { + return 0, fmt.Errorf("could not find latest block in cache or db") + } - // block number not supplied, so we haven't found a pending block, read the latest block instead - h, err = headerByNumberOrHash(ctx, dbtx, latestNumOrHash, api) - if err != nil { - return 0, err - } - if h == nil { - return 0, nil - } - } - hi = h.GasLimit + stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, latestCanBlockNumber, isLatest, 0, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) + if err != nil { + return 0, err + } + header := block.HeaderNoCopy() + + // Determine the highest gas limit can be used during the estimation. + if args.Gas != nil && uint64(*args.Gas) >= params.TxGas { + hi = uint64(*args.Gas) + } else { + hi = header.GasLimit } var feeCap *big.Int @@ -213,6 +207,8 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs feeCap = args.GasPrice.ToInt() } else if args.MaxFeePerGas != nil { feeCap = args.MaxFeePerGas.ToInt() + } else if header.BaseFee != nil { + feeCap = new(big.Int).Set(header.BaseFee) } else { feeCap = libcommon.Big0 } @@ -259,29 +255,6 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs engine := api.engine() - latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber(latestNumOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks - if err != nil { - return 0, err - } - - // try and get the block from the lru cache first then try DB before failing - block := api.tryBlockFromLru(latestCanHash) - if block == nil { - block, err = api.blockWithSenders(ctx, dbtx, latestCanHash, latestCanBlockNumber) - if err != nil { - return 0, err - } - } - if block == nil { - return 0, fmt.Errorf("could not find latest block in cache or db") - } - - stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, latestCanBlockNumber, isLatest, 0, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) - if err != nil { - return 0, err - } - header := block.HeaderNoCopy() - caller, err := transactions.NewReusableCaller(engine, stateReader, nil, header, args, api.GasCap, latestNumOrHash, dbtx, api._blockReader, chainConfig, api.evmCallTimeout) if err != nil { return 0, err diff --git a/turbo/jsonrpc/eth_call_test.go b/turbo/jsonrpc/eth_call_test.go index c537d21762e..7586903b8d5 100644 --- a/turbo/jsonrpc/eth_call_test.go +++ b/turbo/jsonrpc/eth_call_test.go @@ -51,7 +51,7 @@ func TestEstimateGas(t *testing.T) { if _, err := api.EstimateGas(context.Background(), ðapi.CallArgs{ From: &from, To: &to, - }, nil); err != nil { + }); err != nil { t.Errorf("calling EstimateGas: %v", err) } } @@ -103,8 +103,7 @@ func TestEstimateGasHistoricalRPC(t *testing.T) { api.historicalRPCService = historicalRPCService s.UpdatePayload(tt.payload) } - bn := rpc.BlockNumberOrHashWithNumber(0) - val, err := api.EstimateGas(m.Ctx, ðapi2.CallArgs{}, &bn) + val, err := api.EstimateGas(m.Ctx, ðapi2.CallArgs{}) if tt.isError { require.Error(t, err, tt.caseName) require.Equal(t, tt.expected, fmt.Sprintf("%v", err), tt.caseName) diff --git a/turbo/jsonrpc/tracing.go b/turbo/jsonrpc/tracing.go index 6b00bab024d..11b7b80f97c 100644 --- a/turbo/jsonrpc/tracing.go +++ b/turbo/jsonrpc/tracing.go @@ -367,7 +367,7 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA } var stateReader state.StateReader - if config.TxIndex == nil || isLatest { + if config == nil || config.TxIndex == nil || isLatest { stateReader, err = rpchelper.CreateStateReader(ctx, dbtx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) } else { stateReader, err = rpchelper.CreateHistoryStateReader(dbtx, blockNumber, int(*config.TxIndex), api.historyV3(dbtx), chainConfig.ChainName) diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 03c22dd9eb8..b4d298236e6 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -680,7 +680,7 @@ func (s *RoSnapshots) closeWhatNotInList(l []string) { }) } -func (s *RoSnapshots) removeOverlaps() error { +func (s *RoSnapshots) removeOverlapsAfterMerge() error { s.lockSegments() defer s.unlockSegments() @@ -1260,8 +1260,6 @@ func (br *BlockRetire) retireBlocks(ctx context.Context, minBlockNum uint64, max return ok, fmt.Errorf("DumpBlocks: %w", err) } - snapshots.removeOverlaps() - if err := snapshots.ReopenFolder(); err != nil { return ok, fmt.Errorf("reopen: %w", err) } @@ -1297,6 +1295,9 @@ func (br *BlockRetire) retireBlocks(ctx context.Context, minBlockNum uint64, max return ok, err } + if err := snapshots.removeOverlapsAfterMerge(); err != nil { + return ok, err + } return ok, nil } diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go index 9eae5a710c3..a6d6a9f6528 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go @@ -240,7 +240,7 @@ func TestRemoveOverlaps(t *testing.T) { require.NoError(err) require.Equal(45, len(list)) - s.removeOverlaps() + s.removeOverlapsAfterMerge() list, err = snaptype.Segments(s.dir) require.NoError(err) diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go index c0cf858d15b..6aee315ce9f 100644 --- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go @@ -33,19 +33,13 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, blocksRetired := false + minBlockNum = max(blockReader.FrozenBorBlocks(), minBlockNum) for _, snaptype := range blockReader.BorSnapshots().Types() { - minSnapNum := minBlockNum - - if available := blockReader.BorSnapshots().SegmentsMax(); available < minBlockNum { - minSnapNum = available - } - - if maxBlockNum <= minSnapNum { + if maxBlockNum <= minBlockNum { continue } - blockFrom, blockTo, ok := canRetire(minSnapNum, maxBlockNum+1, snaptype.Enum(), br.chainConfig) - + blockFrom, blockTo, ok := CanRetire(maxBlockNum, minBlockNum, snaptype.Enum(), br.chainConfig) if ok { blocksRetired = true @@ -101,11 +95,22 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, return nil } - err := merger.Merge(ctx, &snapshots.RoSnapshots, borsnaptype.BorSnapshotTypes, rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) - + err := merger.Merge(ctx, &snapshots.RoSnapshots, borsnaptype.BorSnapshotTypes(), rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) if err != nil { return blocksRetired, err } + + { + files, _, err := typedSegments(br.borSnapshots().dir, br.borSnapshots().segmentsMin.Load(), borsnaptype.BorSnapshotTypes(), false) + if err != nil { + return blocksRetired, err + } + + // this is one off code to fix an issue in 2.49.x->2.52.x which missed + // removal of intermediate segments after a merge operation + removeBorOverlapsAfterMerge(br.borSnapshots().dir, files, br.borSnapshots().BlocksAvailable()) + } + return blocksRetired, nil } @@ -127,7 +132,7 @@ type BorRoSnapshots struct { // - gaps are not allowed // - segment have [from:to] semantic func NewBorRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, segmentsMin uint64, logger log.Logger) *BorRoSnapshots { - return &BorRoSnapshots{*newRoSnapshots(cfg, snapDir, borsnaptype.BorSnapshotTypes, segmentsMin, logger)} + return &BorRoSnapshots{*newRoSnapshots(cfg, snapDir, borsnaptype.BorSnapshotTypes(), segmentsMin, logger)} } func (s *BorRoSnapshots) Ranges() []Range { @@ -138,7 +143,7 @@ func (s *BorRoSnapshots) Ranges() []Range { // this is one off code to fix an issue in 2.49.x->2.52.x which missed // removal of intermediate segments after a merge operation -func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) { +func removeBorOverlapsAfterMerge(dir string, active []snaptype.FileInfo, max uint64) { list, err := snaptype.Segments(dir) if err != nil { @@ -199,15 +204,11 @@ func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) { } func (s *BorRoSnapshots) ReopenFolder() error { - files, _, err := typedSegments(s.dir, s.segmentsMin.Load(), borsnaptype.BorSnapshotTypes, false) + files, _, err := typedSegments(s.dir, s.segmentsMin.Load(), borsnaptype.BorSnapshotTypes(), false) if err != nil { return err } - // this is one off code to fix an issue in 2.49.x->2.52.x which missed - // removal of intermediate segments after a merge operation - removeBorOverlaps(s.dir, files, s.BlocksAvailable()) - list := make([]string, 0, len(files)) for _, f := range files { _, fName := filepath.Split(f.Path) diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go index 9172f8faf65..4b0677efa17 100644 --- a/turbo/stages/mock/mock_sentry.go +++ b/turbo/stages/mock/mock_sentry.go @@ -371,6 +371,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.ChainConfig, mock.Genesis, mock.ChainConfig.ChainID.Uint64(), + logger, ) maxBlockBroadcastPeers := func(header *types.Header) uint { return 0 }