diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 60c3951a62..b8658f732b 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -125,25 +125,25 @@ jobs: # ============================== - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 with: name: linux path: ./linux - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 with: name: macos path: ./macos - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 with: name: windows path: ./windows - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 with: name: arm64 path: ./arm64 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 304d6f5d0a..b0b57e8d90 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -124,25 +124,25 @@ jobs: # ============================== - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 with: name: linux path: ./linux - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 with: name: macos path: ./macos - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 with: name: windows path: ./windows - name: Download Artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 with: name: arm64 path: ./arm64 diff --git a/.nancy-ignore b/.nancy-ignore index 0b64e763db..6d058384b6 100644 --- a/.nancy-ignore +++ b/.nancy-ignore @@ -1,2 +1,3 @@ CVE-2024-34478 # "CWE-754: Improper Check for Unusual or Exceptional Conditions." This vulnerability is BTC only, BSC does not have the issue. CVE-2024-6104 # "CWE-532: Information Exposure Through Log Files" This is caused by the vulnerabilities go-retryablehttp@v0.7.4, it is only used in cmd devp2p, impact is limited. will upgrade to v0.7.7 later +CVE-2024-8421 # "CWE-400: Uncontrolled Resource Consumption (Resource Exhaustion)" This vulnerability is caused by issues in the golang.org/x/net package. Even the latest version(v0.29.0) has not yet addressed it, but we will continue to monitor updates closely. \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index da3e761d02..513ffbc2ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,43 @@ # Changelog +## v1.4.15 +### BUGFIX +* [\#2680](https://github.com/bnb-chain/bsc/pull/2680) txpool: apply miner's gasceil to txpool +* [\#2688](https://github.com/bnb-chain/bsc/pull/2688) txpool: set default GasCeil from 30M to 0 +* [\#2696](https://github.com/bnb-chain/bsc/pull/2696) miner: limit block size to eth protocol msg size +* [\#2684](https://github.com/bnb-chain/bsc/pull/2684) eth: Add sidecars when available to broadcasted current block + +### FEATURE +* [\#2672](https://github.com/bnb-chain/bsc/pull/2672) faucet: with mainnet balance check, 0.002BNB at least +* [\#2678](https://github.com/bnb-chain/bsc/pull/2678) beaconserver: simulated beacon api server for op-stack +* [\#2687](https://github.com/bnb-chain/bsc/pull/2687) faucet: support customized token +* [\#2698](https://github.com/bnb-chain/bsc/pull/2698) faucet: add example for custimized token +* [\#2706](https://github.com/bnb-chain/bsc/pull/2706) faucet: update DIN token faucet support + +### IMPROVEMENT +* [\#2677](https://github.com/bnb-chain/bsc/pull/2677) log: add some p2p log +* [\#2679](https://github.com/bnb-chain/bsc/pull/2679) build(deps): bump actions/download-artifact in /.github/workflows +* [\#2662](https://github.com/bnb-chain/bsc/pull/2662) metrics: add some extra feature flags as node stats +* [\#2675](https://github.com/bnb-chain/bsc/pull/2675) fetcher: Sleep after marking block as done when requeuing +* [\#2695](https://github.com/bnb-chain/bsc/pull/2695) CI: nancy ignore CVE-2024-8421 +* [\#2689](https://github.com/bnb-chain/bsc/pull/2689) consensus/parlia: wait more time when processing huge blocks + +## v1.4.14 + +### BUGFIX +* [\#2643](https://github.com/bnb-chain/bsc/pull/2643)core: fix cache for receipts +* [\#2656](https://github.com/bnb-chain/bsc/pull/2656)ethclient: fix BlobSidecars api +* [\#2657](https://github.com/bnb-chain/bsc/pull/2657)fix: update prunefreezer’s offset when pruneancient and the dataset has pruned block + +### FEATURE +* [\#2661](https://github.com/bnb-chain/bsc/pull/2661)config: setup Mainnet 2 hardfork date: HaberFix & Bohr + +### IMPROVEMENT +* [\#2578](https://github.com/bnb-chain/bsc/pull/2578)core/systemcontracts: use vm.StateDB in UpgradeBuildInSystemContract +* [\#2649](https://github.com/bnb-chain/bsc/pull/2649)internal/debug: remove memsize +* [\#2655](https://github.com/bnb-chain/bsc/pull/2655)internal/ethapi: make GetFinalizedHeader monotonically increasing +* [\#2658](https://github.com/bnb-chain/bsc/pull/2658)core: improve readability of the fork choice logic +* [\#2665](https://github.com/bnb-chain/bsc/pull/2665)faucet: bump and resend faucet transaction if it has been pending for a while + ## v1.4.13 ### BUGFIX diff --git a/Makefile b/Makefile index 30a36697c6..252813ba05 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,11 @@ geth: @echo "Done building." @echo "Run \"$(GOBIN)/geth\" to launch geth." +#? faucet: Build faucet +faucet: + $(GORUN) build/ci.go install ./cmd/faucet + @echo "Done building faucet" + #? all: Build all packages and executables all: $(GORUN) build/ci.go install diff --git a/README.md b/README.md index 360ba0d7ba..d26fec81c4 100644 --- a/README.md +++ b/README.md @@ -9,16 +9,15 @@ https://pkg.go.dev/badge/github.com/ethereum/go-ethereum )](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) [![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/z2VpC455eU) -But from that baseline of EVM compatible, BNB Smart Chain introduces a system of 21 validators with Proof of Staked Authority (PoSA) consensus that can support short block time and lower fees. The most bonded validator candidates of staking will become validators and produce blocks. The double-sign detection and other slashing logic guarantee security, stability, and chain finality. +But from that baseline of EVM compatible, BNB Smart Chain introduces a system of 21 validators with Proof of Staked Authority (PoSA) consensus that can support short block time and lower fees. The most bonded validator candidates of staking will become validators and produce blocks. The double-sign detection and other slashing logic guarantee security, stability, and chain finality. -Cross-chain transfer and other communication are possible due to native support of interoperability. Relayers and on-chain contracts are developed to support that. BNB Beacon Chain DEX remains a liquid venue of the exchange of assets on both chains. This dual-chain architecture will be ideal for users to take advantage of the fast trading on one side and build their decentralized apps on the other side. **The BNB Smart Chain** will be: +**The BNB Smart Chain** will be: - **A self-sovereign blockchain**: Provides security and safety with elected validators. - **EVM-compatible**: Supports all the existing Ethereum tooling along with faster finality and cheaper transaction fees. -- **Interoperable**: Comes with efficient native dual chain communication; Optimized for scaling high-performance dApps that require fast and smooth user experience. - **Distributed with on-chain governance**: Proof of Staked Authority brings in decentralization and community participants. As the native token, BNB will serve as both the gas of smart contract execution and tokens for staking. -More details in [White Paper](https://www.bnbchain.org/en#smartChain). +More details in [White Paper](https://github.com/bnb-chain/whitepaper/blob/master/WHITEPAPER.md). ## Key features @@ -34,18 +33,8 @@ To combine DPoS and PoA for consensus, BNB Smart Chain implement a novel consens 1. Blocks are produced by a limited set of validators. 2. Validators take turns to produce blocks in a PoA manner, similar to Ethereum's Clique consensus engine. -3. Validator set are elected in and out based on a staking based governance on BNB Beacon Chain. -4. The validator set change is relayed via a cross-chain communication mechanism. -5. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/bnb-smart-chain/staking/overview/#system-contracts) to achieve liveness slash, revenue distributing and validator set renewing func. - - -### Light Client of BNB Beacon Chain - -To achieve the cross-chain communication from BNB Beacon Chain to BNB Smart Chain, need introduce a on-chain light client verification algorithm. -It contains two parts: - -1. [Stateless Precompiled contracts](https://github.com/bnb-chain/bsc/blob/master/core/vm/contracts_lightclient.go) to do tendermint header verification and Merkle Proof verification. -2. [Stateful solidity contracts](https://github.com/bnb-chain/bsc-genesis-contract/blob/master/contracts/TendermintLightClient.sol) to store validator set and trusted appHash. +3. Validator set are elected in and out based on a staking based governance on BNB Smart Chain. +4. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/bnb-smart-chain/staking/overview/#system-contracts) to achieve liveness slash, revenue distributing and validator set renewing func. ## Native Token @@ -53,7 +42,6 @@ BNB will run on BNB Smart Chain in the same way as ETH runs on Ethereum so that BNB will be used to: 1. pay `gas` to deploy or invoke Smart Contract on BSC -2. perform cross-chain operations, such as transfer token assets across BNB Smart Chain and BNB Beacon Chain. ## Building the source @@ -247,9 +235,7 @@ running web servers, so malicious web pages could try to subvert locally availab APIs!** ### Operating a private network -- [BSC-Deploy](https://github.com/bnb-chain/node-deploy/): deploy tool for setting up both BNB Beacon Chain, BNB Smart Chain and the cross chain infrastructure between them. -- [BSC-Docker](https://github.com/bnb-chain/bsc-docker): deploy tool for setting up local BSC cluster in container. - +- [BSC-Deploy](https://github.com/bnb-chain/node-deploy/): deploy tool for setting up BNB Smart Chain. ## Running a bootnode diff --git a/beacon/fakebeacon/api_func.go b/beacon/fakebeacon/api_func.go new file mode 100644 index 0000000000..674bf7fb39 --- /dev/null +++ b/beacon/fakebeacon/api_func.go @@ -0,0 +1,87 @@ +package fakebeacon + +import ( + "context" + "sort" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" +) + +type BlobSidecar struct { + Blob kzg4844.Blob `json:"blob"` + Index int `json:"index"` + KZGCommitment kzg4844.Commitment `json:"kzg_commitment"` + KZGProof kzg4844.Proof `json:"kzg_proof"` +} + +type APIGetBlobSidecarsResponse struct { + Data []*BlobSidecar `json:"data"` +} + +type ReducedGenesisData struct { + GenesisTime string `json:"genesis_time"` +} + +type APIGenesisResponse struct { + Data ReducedGenesisData `json:"data"` +} + +type ReducedConfigData struct { + SecondsPerSlot string `json:"SECONDS_PER_SLOT"` +} + +type IndexedBlobHash struct { + Index int // absolute index in the block, a.k.a. position in sidecar blobs array + Hash common.Hash // hash of the blob, used for consistency checks +} + +func configSpec() ReducedConfigData { + return ReducedConfigData{SecondsPerSlot: "1"} +} + +func beaconGenesis() APIGenesisResponse { + return APIGenesisResponse{Data: ReducedGenesisData{GenesisTime: "0"}} +} + +func beaconBlobSidecars(ctx context.Context, backend ethapi.Backend, slot uint64, indices []int) (APIGetBlobSidecarsResponse, error) { + var blockNrOrHash rpc.BlockNumberOrHash + header, err := fetchBlockNumberByTime(ctx, int64(slot), backend) + if err != nil { + log.Error("Error fetching block number", "slot", slot, "indices", indices) + return APIGetBlobSidecarsResponse{}, err + } + sideCars, err := backend.GetBlobSidecars(ctx, header.Hash()) + if err != nil { + log.Error("Error fetching Sidecars", "blockNrOrHash", blockNrOrHash, "err", err) + return APIGetBlobSidecarsResponse{}, err + } + sort.Ints(indices) + fullBlob := len(indices) == 0 + res := APIGetBlobSidecarsResponse{} + idx := 0 + curIdx := 0 + for _, sideCar := range sideCars { + for i := 0; i < len(sideCar.Blobs); i++ { + //hash := kZGToVersionedHash(sideCar.Commitments[i]) + if !fullBlob && curIdx >= len(indices) { + break + } + if fullBlob || idx == indices[curIdx] { + res.Data = append(res.Data, &BlobSidecar{ + Index: idx, + Blob: sideCar.Blobs[i], + KZGCommitment: sideCar.Commitments[i], + KZGProof: sideCar.Proofs[i], + }) + curIdx++ + } + idx++ + } + } + + return res, nil +} diff --git a/beacon/fakebeacon/handlers.go b/beacon/fakebeacon/handlers.go new file mode 100644 index 0000000000..3d3768aa42 --- /dev/null +++ b/beacon/fakebeacon/handlers.go @@ -0,0 +1,88 @@ +package fakebeacon + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/prysmaticlabs/prysm/v5/api/server/structs" + field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams" + "github.com/prysmaticlabs/prysm/v5/network/httputil" +) + +var ( + versionMethod = "/eth/v1/node/version" + specMethod = "/eth/v1/config/spec" + genesisMethod = "/eth/v1/beacon/genesis" + sidecarsMethodPrefix = "/eth/v1/beacon/blob_sidecars/{slot}" +) + +func VersionMethod(w http.ResponseWriter, r *http.Request) { + resp := &structs.GetVersionResponse{ + Data: &structs.Version{ + Version: "", + }, + } + httputil.WriteJson(w, resp) +} + +func SpecMethod(w http.ResponseWriter, r *http.Request) { + httputil.WriteJson(w, &structs.GetSpecResponse{Data: configSpec()}) +} + +func GenesisMethod(w http.ResponseWriter, r *http.Request) { + httputil.WriteJson(w, beaconGenesis()) +} + +func (s *Service) SidecarsMethod(w http.ResponseWriter, r *http.Request) { + indices, err := parseIndices(r.URL) + if err != nil { + httputil.HandleError(w, err.Error(), http.StatusBadRequest) + return + } + segments := strings.Split(r.URL.Path, "/") + slot, err := strconv.ParseUint(segments[len(segments)-1], 10, 64) + if err != nil { + httputil.HandleError(w, "not a valid slot(timestamp)", http.StatusBadRequest) + return + } + + resp, err := beaconBlobSidecars(r.Context(), s.backend, slot, indices) + if err != nil { + httputil.HandleError(w, err.Error(), http.StatusBadRequest) + return + } + httputil.WriteJson(w, resp) +} + +// parseIndices filters out invalid and duplicate blob indices +func parseIndices(url *url.URL) ([]int, error) { + rawIndices := url.Query()["indices"] + indices := make([]int, 0, field_params.MaxBlobsPerBlock) + invalidIndices := make([]string, 0) +loop: + for _, raw := range rawIndices { + ix, err := strconv.Atoi(raw) + if err != nil { + invalidIndices = append(invalidIndices, raw) + continue + } + if ix >= field_params.MaxBlobsPerBlock { + invalidIndices = append(invalidIndices, raw) + continue + } + for i := range indices { + if ix == indices[i] { + continue loop + } + } + indices = append(indices, ix) + } + + if len(invalidIndices) > 0 { + return nil, fmt.Errorf("requested blob indices %v are invalid", invalidIndices) + } + return indices, nil +} diff --git a/beacon/fakebeacon/server.go b/beacon/fakebeacon/server.go new file mode 100644 index 0000000000..91f48a2fbd --- /dev/null +++ b/beacon/fakebeacon/server.go @@ -0,0 +1,97 @@ +package fakebeacon + +import ( + "net/http" + "strconv" + + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/gorilla/mux" + "github.com/prysmaticlabs/prysm/v5/api/server" +) + +const ( + DefaultAddr = "localhost" + DefaultPort = 8686 +) + +type Config struct { + Enable bool + Addr string + Port int +} + +func defaultConfig() *Config { + return &Config{ + Enable: false, + Addr: DefaultAddr, + Port: DefaultPort, + } +} + +type Service struct { + cfg *Config + router *mux.Router + backend ethapi.Backend +} + +func NewService(cfg *Config, backend ethapi.Backend) *Service { + cfgs := defaultConfig() + if cfg.Addr != "" { + cfgs.Addr = cfg.Addr + } + if cfg.Port > 0 { + cfgs.Port = cfg.Port + } + + s := &Service{ + cfg: cfgs, + backend: backend, + } + router := s.newRouter() + s.router = router + return s +} + +func (s *Service) Run() { + _ = http.ListenAndServe(s.cfg.Addr+":"+strconv.Itoa(s.cfg.Port), s.router) +} + +func (s *Service) newRouter() *mux.Router { + r := mux.NewRouter() + r.Use(server.NormalizeQueryValuesHandler) + for _, e := range s.endpoints() { + r.HandleFunc(e.path, e.handler).Methods(e.methods...) + } + return r +} + +type endpoint struct { + path string + handler http.HandlerFunc + methods []string +} + +func (s *Service) endpoints() []endpoint { + return []endpoint{ + { + path: versionMethod, + handler: VersionMethod, + methods: []string{http.MethodGet}, + }, + { + path: specMethod, + handler: SpecMethod, + methods: []string{http.MethodGet}, + }, + { + path: genesisMethod, + handler: GenesisMethod, + methods: []string{http.MethodGet}, + }, + { + path: sidecarsMethodPrefix, + handler: s.SidecarsMethod, + methods: []string{http.MethodGet}, + }, + } +} diff --git a/beacon/fakebeacon/server_test.go b/beacon/fakebeacon/server_test.go new file mode 100644 index 0000000000..0b74f565ba --- /dev/null +++ b/beacon/fakebeacon/server_test.go @@ -0,0 +1,90 @@ +package fakebeacon + +import ( + "context" + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +// +//func TestFetchBlockNumberByTime(t *testing.T) { +// blockNum, err := fetchBlockNumberByTime(context.Background(), 1724052941, client) +// assert.Nil(t, err) +// assert.Equal(t, uint64(41493946), blockNum) +// +// blockNum, err = fetchBlockNumberByTime(context.Background(), 1734052941, client) +// assert.Equal(t, err, errors.New("time too large")) +// +// blockNum, err = fetchBlockNumberByTime(context.Background(), 1600153618, client) +// assert.Nil(t, err) +// assert.Equal(t, uint64(493946), blockNum) +//} +// +//func TestBeaconBlobSidecars(t *testing.T) { +// indexBlobHash := []IndexedBlobHash{ +// {Hash: common.HexToHash("0x01231952ecbaede62f8d0398b656072c072db36982c9ef106fbbc39ce14f983c"), Index: 0}, +// {Hash: common.HexToHash("0x012c21a8284d2d707bb5318e874d2e1b97a53d028e96abb702b284a2cbb0f79c"), Index: 1}, +// {Hash: common.HexToHash("0x011196c8d02536ede0382aa6e9fdba6c460169c0711b5f97fcd701bd8997aee3"), Index: 2}, +// {Hash: common.HexToHash("0x019c86b46b27401fb978fd175d1eb7dadf4976d6919501b0c5280d13a5bab57b"), Index: 3}, +// {Hash: common.HexToHash("0x01e00db7ee99176b3fd50aab45b4fae953292334bbf013707aac58c455d98596"), Index: 4}, +// {Hash: common.HexToHash("0x0117d23b68123d578a98b3e1aa029661e0abda821a98444c21992eb1e5b7208f"), Index: 5}, +// //{Hash: common.HexToHash("0x01e00db7ee99176b3fd50aab45b4fae953292334bbf013707aac58c455d98596"), Index: 1}, +// } +// +// resp, err := beaconBlobSidecars(context.Background(), 1724055046, []int{0, 1, 2, 3, 4, 5}) // block: 41494647 +// assert.Nil(t, err) +// assert.NotNil(t, resp) +// assert.NotEmpty(t, resp.Data) +// for i, sideCar := range resp.Data { +// assert.Equal(t, indexBlobHash[i].Index, sideCar.Index) +// assert.Equal(t, indexBlobHash[i].Hash, kZGToVersionedHash(sideCar.KZGCommitment)) +// } +// +// apiscs := make([]*BlobSidecar, 0, len(indexBlobHash)) +// // filter and order by hashes +// for _, h := range indexBlobHash { +// for _, apisc := range resp.Data { +// if h.Index == int(apisc.Index) { +// apiscs = append(apiscs, apisc) +// break +// } +// } +// } +// +// assert.Equal(t, len(apiscs), len(resp.Data)) +// assert.Equal(t, len(apiscs), len(indexBlobHash)) +//} + +type TimeToSlotFn func(timestamp uint64) (uint64, error) + +// GetTimeToSlotFn returns a function that converts a timestamp to a slot number. +func GetTimeToSlotFn(ctx context.Context) (TimeToSlotFn, error) { + genesis := beaconGenesis() + config := configSpec() + + genesisTime, _ := strconv.ParseUint(genesis.Data.GenesisTime, 10, 64) + secondsPerSlot, _ := strconv.ParseUint(config.SecondsPerSlot, 10, 64) + if secondsPerSlot == 0 { + return nil, fmt.Errorf("got bad value for seconds per slot: %v", config.SecondsPerSlot) + } + timeToSlotFn := func(timestamp uint64) (uint64, error) { + if timestamp < genesisTime { + return 0, fmt.Errorf("provided timestamp (%v) precedes genesis time (%v)", timestamp, genesisTime) + } + return (timestamp - genesisTime) / secondsPerSlot, nil + } + return timeToSlotFn, nil +} + +func TestAPI(t *testing.T) { + slotFn, err := GetTimeToSlotFn(context.Background()) + assert.Nil(t, err) + + expTx := uint64(123151345) + gotTx, err := slotFn(expTx) + assert.Nil(t, err) + assert.Equal(t, expTx, gotTx) +} diff --git a/beacon/fakebeacon/utils.go b/beacon/fakebeacon/utils.go new file mode 100644 index 0000000000..cc6fe889b9 --- /dev/null +++ b/beacon/fakebeacon/utils.go @@ -0,0 +1,65 @@ +package fakebeacon + +import ( + "context" + "errors" + "fmt" + "math/rand" + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/rpc" +) + +func fetchBlockNumberByTime(ctx context.Context, ts int64, backend ethapi.Backend) (*types.Header, error) { + // calc the block number of the ts. + currentHeader := backend.CurrentHeader() + blockTime := int64(currentHeader.Time) + if ts > blockTime { + return nil, errors.New("time too large") + } + blockNum := currentHeader.Number.Uint64() + estimateEndNumber := int64(blockNum) - (blockTime-ts)/3 + // find the end number + for { + header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(estimateEndNumber)) + if err != nil { + time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond) + continue + } + if header == nil { + estimateEndNumber -= 1 + time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond) + continue + } + headerTime := int64(header.Time) + if headerTime == ts { + return header, nil + } + + // let the estimateEndNumber a little bigger than real value + if headerTime > ts+8 { + estimateEndNumber -= (headerTime - ts) / 3 + } else if headerTime < ts { + estimateEndNumber += (ts-headerTime)/3 + 1 + } else { + // search one by one + for headerTime >= ts { + header, err = backend.HeaderByNumber(ctx, rpc.BlockNumber(estimateEndNumber-1)) + if err != nil { + time.Sleep(time.Duration(rand.Int()%180) * time.Millisecond) + continue + } + headerTime = int64(header.Time) + if headerTime == ts { + return header, nil + } + estimateEndNumber -= 1 + if headerTime < ts { //found the real endNumber + return nil, fmt.Errorf("block not found by time %d", ts) + } + } + } + } +} diff --git a/cmd/faucet/customized/DIN.png b/cmd/faucet/customized/DIN.png new file mode 100644 index 0000000000..be36433308 Binary files /dev/null and b/cmd/faucet/customized/DIN.png differ diff --git a/cmd/faucet/customized/README.md b/cmd/faucet/customized/README.md new file mode 100644 index 0000000000..65a5a9b64c --- /dev/null +++ b/cmd/faucet/customized/README.md @@ -0,0 +1,23 @@ +# 1.Background +This is to support some projects with customized tokens that they want to integrate into the BSC faucet tool. + +## 1.1. How to Integrate Your Token +- Step 1: Fund the faucet address by sending a specific amount of your BEP-20 token to the faucet address (0xaa25aa7a19f9c426e07dee59b12f944f4d9f1dd3) on the BSC testnet. +- Step 2: Update this README.md file and create a Pull Request on [bsc github](https://github.com/bnb-chain/bsc) with relevant information. + +We will review the request, and once it is approved, the faucet tool will start to support the customized token and list it on https://www.bnbchain.org/en/testnet-faucet. + +# 2.Token List +## 2.1.DemoToken +- symbol: DEMO +- amount: 10000000000000000000 +- icon: ./demotoken.png +- addr: https://testnet.bscscan.com/address/0xe15c158d768c306dae87b96430a94f884333e55d +- fundTx: [0xa499dc9aaf918aff0507538a8aa80a88d0af6ca15054e6acc57b69c651945280](https://testnet.bscscan.com/tx/0x2a3f334b6ca756b64331bdec9e6cf3207ac50a4839fda6379e909de4d9a194ca) +- +## 2.2.DIN token +- symbol: DIN +- amount: 10000000000000000000 +- icon: ./DIN.png +- addr: https://testnet.bscscan.com/address/0xb8b40FcC5B4519Dba0E07Ac8821884CE90BdE677 +- fundTx: [0x17fc4c1db133830c7c146a0d41ca1df31cb446989ec11b382d58bb6176d6fde3](https://testnet.bscscan.com/tx/0x17fc4c1db133830c7c146a0d41ca1df31cb446989ec11b382d58bb6176d6fde3) diff --git a/cmd/faucet/customized/demotoken.png b/cmd/faucet/customized/demotoken.png new file mode 100644 index 0000000000..1eff3f32d8 Binary files /dev/null and b/cmd/faucet/customized/demotoken.png differ diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 25a0cc084a..918b60a586 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -53,9 +53,10 @@ import ( ) var ( - genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with") - apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection") - wsEndpoint = flag.String("ws", "http://127.0.0.1:7777/", "Url to ws endpoint") + genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with") + apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection") + wsEndpoint = flag.String("ws", "http://127.0.0.1:7777/", "Url to ws endpoint") + wsEndpointMainnet = flag.String("ws.mainnet", "", "Url to ws endpoint of BSC mainnet") netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet") payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request") @@ -77,6 +78,12 @@ var ( fixGasPrice = flag.Int64("faucet.fixedprice", 0, "Will use fixed gas price if specified") twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API") twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API") + + resendInterval = 15 * time.Second + resendBatchSize = 3 + resendMaxGasPrice = big.NewInt(50 * params.GWei) + wsReadTimeout = 5 * time.Minute + minMainnetBalance = big.NewInt(2 * 1e6 * params.GWei) // 0.002 bnb ) var ( @@ -87,11 +94,17 @@ var ( //go:embed faucet.html var websiteTmpl string +func weiToEtherStringFx(wei *big.Int, prec int) string { + etherValue := new(big.Float).Quo(new(big.Float).SetInt(wei), big.NewFloat(params.Ether)) + // Format the big.Float directly to a string with the specified precision + return etherValue.Text('f', prec) +} + func main() { // Parse the flags and set up the logger to print everything requested flag.Parse() - log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.FromLegacyLevel(*logFlag), true))) - + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.FromLegacyLevel(*logFlag), false))) + log.Info("faucet started") // Construct the payout tiers amounts := make([]string, *tiersFlag) for i := 0; i < *tiersFlag; i++ { @@ -170,7 +183,7 @@ func main() { log.Crit("Failed to unlock faucet signer account", "err", err) } // Assemble and start the faucet light service - faucet, err := newFaucet(genesis, *wsEndpoint, ks, website.Bytes(), bep2eInfos) + faucet, err := newFaucet(genesis, *wsEndpoint, *wsEndpointMainnet, ks, website.Bytes(), bep2eInfos) if err != nil { log.Crit("Failed to start faucet", "err", err) } @@ -197,9 +210,10 @@ type bep2eInfo struct { // faucet represents a crypto faucet backed by an Ethereum light client. type faucet struct { - config *params.ChainConfig // Chain configurations for signing - client *ethclient.Client // Client connection to the Ethereum chain - index []byte // Index page to serve up on the web + config *params.ChainConfig // Chain configurations for signing + client *ethclient.Client // Client connection to the Ethereum chain + clientMainnet *ethclient.Client // Client connection to BSC mainnet for balance check + index []byte // Index page to serve up on the web keystore *keystore.KeyStore // Keystore containing the single signer account accounts.Account // Account funding user faucet requests @@ -228,7 +242,7 @@ type wsConn struct { wlock sync.Mutex } -func newFaucet(genesis *core.Genesis, url string, ks *keystore.KeyStore, index []byte, bep2eInfos map[string]bep2eInfo) (*faucet, error) { +func newFaucet(genesis *core.Genesis, url string, mainnetUrl string, ks *keystore.KeyStore, index []byte, bep2eInfos map[string]bep2eInfo) (*faucet, error) { bep2eAbi, err := abi.JSON(strings.NewReader(bep2eAbiJson)) if err != nil { return nil, err @@ -237,6 +251,11 @@ func newFaucet(genesis *core.Genesis, url string, ks *keystore.KeyStore, index [ if err != nil { return nil, err } + clientMainnet, err := ethclient.Dial(mainnetUrl) + if err != nil { + // skip mainnet balance check if it there is no available mainnet endpoint + log.Warn("dail mainnet endpoint failed", "mainnetUrl", mainnetUrl, "err", err) + } // Allow 1 request per minute with burst of 5, and cache up to 1000 IPs limiter, err := NewIPRateLimiter(rate.Limit(1.0), 5, 1000) @@ -245,16 +264,17 @@ func newFaucet(genesis *core.Genesis, url string, ks *keystore.KeyStore, index [ } return &faucet{ - config: genesis.Config, - client: client, - index: index, - keystore: ks, - account: ks.Accounts()[0], - timeouts: make(map[string]time.Time), - update: make(chan struct{}, 1), - bep2eInfos: bep2eInfos, - bep2eAbi: bep2eAbi, - limiter: limiter, + config: genesis.Config, + client: client, + clientMainnet: clientMainnet, + index: index, + keystore: ks, + account: ks.Accounts()[0], + timeouts: make(map[string]time.Time), + update: make(chan struct{}, 1), + bep2eInfos: bep2eInfos, + bep2eAbi: bep2eAbi, + limiter: limiter, }, nil } @@ -378,7 +398,11 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { Captcha string `json:"captcha"` Symbol string `json:"symbol"` } + // not sure if it helps or not, but set a read deadline could help prevent resource leakage + // if user did not give response for too long, then the routine will be stuck. + conn.SetReadDeadline(time.Now().Add(wsReadTimeout)) if err = conn.ReadJSON(&msg); err != nil { + log.Debug("read json message failed", "err", err, "ip", ip) return } if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") { @@ -396,9 +420,9 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } continue } - log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier) + log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier, "ip", ip) - // If captcha verifications are enabled, make sure we're not dealing with a robot + // check #1: captcha verifications to exclude robot if *captchaToken != "" { form := url.Values{} form.Add("secret", *captchaSecret) @@ -475,88 +499,108 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } continue } - log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address) - // Ensure the user didn't request funds too recently + // check #2: check IP and ID(address) to ensure the user didn't request funds too frequently f.lock.Lock() - var ( - fund bool - timeout time.Time - ) if ipTimeout := f.timeouts[ips[len(ips)-2]]; time.Now().Before(ipTimeout) { + f.lock.Unlock() if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(ipTimeout)))); err != nil { // nolint: gosimple log.Warn("Failed to send funding error to client", "err", err) + return } + log.Info("too frequent funding(ip)", "TimeLeft", common.PrettyDuration(time.Until(ipTimeout)), "ip", ips[len(ips)-2], "ipsStr", ipsStr) + continue + } + if idTimeout := f.timeouts[id]; time.Now().Before(idTimeout) { f.lock.Unlock() + // Send an error if too frequent funding, otherwise a success + if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(idTimeout)))); err != nil { // nolint: gosimple + log.Warn("Failed to send funding error to client", "err", err) + return + } + log.Info("too frequent funding(id)", "TimeLeft", common.PrettyDuration(time.Until(idTimeout)), "id", id) continue } - - if timeout = f.timeouts[id]; time.Now().After(timeout) { - var tx *types.Transaction - if msg.Symbol == "BNB" { - // User wasn't funded recently, create the funding transaction - amount := new(big.Int).Div(new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether), big.NewInt(10)) - amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil)) - amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil)) - - tx = types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil) + // check #3: minimum mainnet balance check, internal error will bypass the check to avoid blocking the faucet service + if f.clientMainnet != nil { + mainnetAddr := address + balanceMainnet, err := f.clientMainnet.BalanceAt(context.Background(), mainnetAddr, nil) + if err != nil { + log.Warn("check balance failed, call BalanceAt", "err", err) + } else if balanceMainnet == nil { + log.Warn("check balance failed, balanceMainnet is nil") } else { - tokenInfo, ok := f.bep2eInfos[msg.Symbol] - if !ok { - f.lock.Unlock() - log.Warn("Failed to find symbol", "symbol", msg.Symbol) - continue - } - input, err := f.bep2eAbi.Pack("transfer", address, &tokenInfo.Amount) - if err != nil { + if balanceMainnet.Cmp(minMainnetBalance) < 0 { f.lock.Unlock() - log.Warn("Failed to pack transfer transaction", "err", err) + log.Warn("insufficient BNB on BSC mainnet", "address", mainnetAddr, + "balanceMainnet", balanceMainnet, "minMainnetBalance", minMainnetBalance) + // Send an error if failed to meet the minimum balance requirement + if err = sendError(wsconn, fmt.Errorf("insufficient BNB on BSC mainnet (require >=%sBNB)", + weiToEtherStringFx(minMainnetBalance, 3))); err != nil { + log.Warn("Failed to send mainnet minimum balance error to client", "err", err) + return + } continue } - tx = types.NewTransaction(f.nonce+uint64(len(f.reqs)), tokenInfo.Contract, nil, 420000, f.price, input) } - signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID) - if err != nil { + } + log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address, "ip", ip) + + // now, it is ok to send tBNB or other tokens + var tx *types.Transaction + if msg.Symbol == "BNB" { + // User wasn't funded recently, create the funding transaction + amount := new(big.Int).Div(new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether), big.NewInt(10)) + amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil)) + amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil)) + + tx = types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil) + } else { + tokenInfo, ok := f.bep2eInfos[msg.Symbol] + if !ok { f.lock.Unlock() - if err = sendError(wsconn, err); err != nil { - log.Warn("Failed to send transaction creation error to client", "err", err) - return - } + log.Warn("Failed to find symbol", "symbol", msg.Symbol) continue } - // Submit the transaction and mark as funded if successful - if err := f.client.SendTransaction(context.Background(), signed); err != nil { + input, err := f.bep2eAbi.Pack("transfer", address, &tokenInfo.Amount) + if err != nil { f.lock.Unlock() - if err = sendError(wsconn, err); err != nil { - log.Warn("Failed to send transaction transmission error to client", "err", err) - return - } + log.Warn("Failed to pack transfer transaction", "err", err) continue } - f.reqs = append(f.reqs, &request{ - Avatar: avatar, - Account: address, - Time: time.Now(), - Tx: signed, - }) - timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute - grace := timeout / 288 // 24h timeout => 5m grace - - f.timeouts[id] = time.Now().Add(timeout - grace) - f.timeouts[ips[len(ips)-2]] = time.Now().Add(timeout - grace) - fund = true + tx = types.NewTransaction(f.nonce+uint64(len(f.reqs)), tokenInfo.Contract, nil, 420000, f.price, input) } - f.lock.Unlock() - - // Send an error if too frequent funding, otherwise a success - if !fund { - if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple - log.Warn("Failed to send funding error to client", "err", err) + signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID) + if err != nil { + f.lock.Unlock() + if err = sendError(wsconn, err); err != nil { + log.Warn("Failed to send transaction creation error to client", "err", err) return } continue } + // Submit the transaction and mark as funded if successful + if err := f.client.SendTransaction(context.Background(), signed); err != nil { + f.lock.Unlock() + if err = sendError(wsconn, err); err != nil { + log.Warn("Failed to send transaction transmission error to client", "err", err) + return + } + continue + } + f.reqs = append(f.reqs, &request{ + Avatar: avatar, + Account: address, + Time: time.Now(), + Tx: signed, + }) + timeoutInt64 := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute + grace := timeoutInt64 / 288 // 24h timeout => 5m grace + + f.timeouts[id] = time.Now().Add(timeoutInt64 - grace) + f.timeouts[ips[len(ips)-2]] = time.Now().Add(timeoutInt64 - grace) + f.lock.Unlock() if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil { log.Warn("Failed to send funding success to client", "err", err) return @@ -605,9 +649,52 @@ func (f *faucet) refresh(head *types.Header) error { f.lock.Lock() f.head, f.balance = head, balance f.price, f.nonce = price, nonce - if len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() > f.nonce { + if len(f.reqs) == 0 { + log.Debug("refresh len(f.reqs) == 0", "f.nonce", f.nonce) + f.lock.Unlock() + return nil + } + if f.reqs[0].Tx.Nonce() == f.nonce { + // if the next Tx failed to be included for a certain time(resendInterval), try to + // resend it with higher gasPrice, as it could be discarded in the network. + // Also resend extra following txs, as they could be discarded as well. + if time.Now().After(f.reqs[0].Time.Add(resendInterval)) { + for i, req := range f.reqs { + if i >= resendBatchSize { + break + } + prePrice := req.Tx.GasPrice() + // bump gas price 20% to replace the previous tx + newPrice := new(big.Int).Add(prePrice, new(big.Int).Div(prePrice, big.NewInt(5))) + if newPrice.Cmp(resendMaxGasPrice) >= 0 { + log.Info("resendMaxGasPrice reached", "newPrice", newPrice, "resendMaxGasPrice", resendMaxGasPrice, "nonce", req.Tx.Nonce()) + break + } + newTx := types.NewTransaction(req.Tx.Nonce(), *req.Tx.To(), req.Tx.Value(), req.Tx.Gas(), newPrice, req.Tx.Data()) + newSigned, err := f.keystore.SignTx(f.account, newTx, f.config.ChainID) + if err != nil { + log.Error("resend sign tx failed", "err", err) + } + log.Info("reqs[0] Tx has been stuck for a while, trigger resend", + "resendInterval", resendInterval, "resendTxSize", resendBatchSize, + "preHash", req.Tx.Hash().Hex(), "newHash", newSigned.Hash().Hex(), + "newPrice", newPrice, "nonce", req.Tx.Nonce(), "req.Tx.Gas()", req.Tx.Gas()) + if err := f.client.SendTransaction(context.Background(), newSigned); err != nil { + log.Warn("resend tx failed", "err", err) + continue + } + req.Tx = newSigned + } + } + } + // it is abnormal that reqs[0] has larger nonce than next expected nonce. + // could be caused by reorg? reset it + if f.reqs[0].Tx.Nonce() > f.nonce { + log.Warn("reset due to nonce gap", "f.nonce", f.nonce, "f.reqs[0].Tx.Nonce()", f.reqs[0].Tx.Nonce()) f.reqs = f.reqs[:0] } + // remove the reqs if they have smaller nonce, which means it is no longer valid, + // either has been accepted or replaced. for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce { f.reqs = f.reqs[1:] } diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 5c829a2f76..10d2224a14 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet" + "github.com/ethereum/go-ethereum/beacon/fakebeacon" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -92,10 +93,11 @@ type ethstatsConfig struct { } type gethConfig struct { - Eth ethconfig.Config - Node node.Config - Ethstats ethstatsConfig - Metrics metrics.Config + Eth ethconfig.Config + Node node.Config + Ethstats ethstatsConfig + Metrics metrics.Config + FakeBeacon fakebeacon.Config } func loadConfig(file string, cfg *gethConfig) error { @@ -242,11 +244,22 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) } + if ctx.IsSet(utils.FakeBeaconAddrFlag.Name) { + cfg.FakeBeacon.Addr = ctx.String(utils.FakeBeaconAddrFlag.Name) + } + if ctx.IsSet(utils.FakeBeaconPortFlag.Name) { + cfg.FakeBeacon.Port = ctx.Int(utils.FakeBeaconPortFlag.Name) + } + if cfg.FakeBeacon.Enable || ctx.IsSet(utils.FakeBeaconEnabledFlag.Name) { + go fakebeacon.NewService(&cfg.FakeBeacon, backend).Run() + } + git, _ := version.VCS() utils.SetupMetrics(ctx, utils.EnableBuildInfo(git.Commit, git.Date), utils.EnableMinerInfo(ctx, &cfg.Eth.Miner), utils.EnableNodeInfo(&cfg.Eth.TxPool, stack.Server().NodeInfo()), + utils.EnableNodeTrack(ctx, &cfg.Eth, stack), ) return stack, backend } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a851c8d373..e2c277b64f 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -232,6 +232,12 @@ var ( utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBOrganizationFlag, } + + fakeBeaconFlags = []cli.Flag{ + utils.FakeBeaconEnabledFlag, + utils.FakeBeaconAddrFlag, + utils.FakeBeaconPortFlag, + } ) var app = flags.NewApp("the go-ethereum command line interface") @@ -286,6 +292,7 @@ func init() { consoleFlags, debug.Flags, metricsFlags, + fakeBeaconFlags, ) flags.AutoEnvVars(app.Flags, "GETH") @@ -371,8 +378,6 @@ func geth(ctx *cli.Context) error { // it unlocks any requested accounts, and starts the RPC/IPC interfaces and the // miner. func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) { - debug.Memsize.Add("node", stack) - // Start up the node itself utils.StartNode(ctx, stack, isConsole) @@ -445,22 +450,23 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon } // Start auxiliary services if enabled + ethBackend, ok := backend.(*eth.EthAPIBackend) + gasCeil := ethBackend.Miner().GasCeil() + if gasCeil > params.SystemTxsGas { + ethBackend.TxPool().SetMaxGas(gasCeil - params.SystemTxsGas) + } if ctx.Bool(utils.MiningEnabledFlag.Name) { // Mining only makes sense if a full Ethereum node is running if ctx.String(utils.SyncModeFlag.Name) == "light" { utils.Fatalf("Light clients do not support mining") } - ethBackend, ok := backend.(*eth.EthAPIBackend) + if !ok { utils.Fatalf("Ethereum service not running") } // Set the gas price to the limits from the CLI and start mining gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) ethBackend.TxPool().SetGasTip(gasprice) - gasCeil := ethBackend.Miner().GasCeil() - if gasCeil > params.SystemTxsGas { - ethBackend.TxPool().SetMaxGas(gasCeil - params.SystemTxsGas) - } if err := ethBackend.StartMining(); err != nil { utils.Fatalf("Failed to start mining: %v", err) } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 049908857f..f26ce3881e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -35,8 +35,11 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/internal/version" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/beacon/fakebeacon" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/fdlimit" "github.com/ethereum/go-ethereum/core" @@ -1146,6 +1149,25 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server. Value: params.DefaultExtraReserveForBlobRequests, Category: flags.MiscCategory, } + + // Fake beacon + FakeBeaconEnabledFlag = &cli.BoolFlag{ + Name: "fake-beacon", + Usage: "Enable the HTTP-RPC server of fake-beacon", + Category: flags.APICategory, + } + FakeBeaconAddrFlag = &cli.StringFlag{ + Name: "fake-beacon.addr", + Usage: "HTTP-RPC server listening addr of fake-beacon", + Value: fakebeacon.DefaultAddr, + Category: flags.APICategory, + } + FakeBeaconPortFlag = &cli.IntFlag{ + Name: "fake-beacon.port", + Usage: "HTTP-RPC server listening port of fake-beacon", + Value: fakebeacon.DefaultPort, + Category: flags.APICategory, + } ) var ( @@ -2297,6 +2319,67 @@ func EnableNodeInfo(poolConfig *legacypool.Config, nodeInfo *p2p.NodeInfo) Setup } } +func EnableNodeTrack(ctx *cli.Context, cfg *ethconfig.Config, stack *node.Node) SetupMetricsOption { + nodeInfo := stack.Server().NodeInfo() + return func() { + // register node info into metrics + metrics.NewRegisteredLabel("node-stats", nil).Mark(map[string]interface{}{ + "NodeType": parseNodeType(), + "ENR": nodeInfo.ENR, + "Mining": ctx.Bool(MiningEnabledFlag.Name), + "Etherbase": parseEtherbase(cfg), + "MiningFeatures": parseMiningFeatures(ctx, cfg), + "DBFeatures": parseDBFeatures(cfg, stack), + }) + } +} + +func parseEtherbase(cfg *ethconfig.Config) string { + if cfg.Miner.Etherbase == (common.Address{}) { + return "" + } + return cfg.Miner.Etherbase.String() +} + +func parseNodeType() string { + git, _ := version.VCS() + version := []string{params.VersionWithMeta} + if len(git.Commit) >= 7 { + version = append(version, git.Commit[:7]) + } + if git.Date != "" { + version = append(version, git.Date) + } + arch := []string{runtime.GOOS, runtime.GOARCH} + infos := []string{"BSC", strings.Join(version, "-"), strings.Join(arch, "-"), runtime.Version()} + return strings.Join(infos, "/") +} + +func parseDBFeatures(cfg *ethconfig.Config, stack *node.Node) string { + var features []string + if cfg.StateScheme == rawdb.PathScheme { + features = append(features, "PBSS") + } + if stack.CheckIfMultiDataBase() { + features = append(features, "MultiDB") + } + return strings.Join(features, "|") +} + +func parseMiningFeatures(ctx *cli.Context, cfg *ethconfig.Config) string { + if !ctx.Bool(MiningEnabledFlag.Name) { + return "" + } + var features []string + if cfg.Miner.Mev.Enabled { + features = append(features, "MEV") + } + if cfg.Miner.VoteEnable { + features = append(features, "FFVoting") + } + return strings.Join(features, "|") +} + func SetupMetrics(ctx *cli.Context, options ...SetupMetricsOption) { if metrics.Enabled { log.Info("Enabling metrics collection") diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 4a79ec344b..b52469f025 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -68,7 +68,6 @@ const ( wiggleTime = uint64(1) // second, Random delay (per signer) to allow concurrent signers initialBackOffTime = uint64(1) // second - processBackOffTime = uint64(1) // second systemRewardPercent = 4 // it means 1/2^4 = 1/16 percentage of gas fee incoming will be distributed to system @@ -1550,6 +1549,23 @@ func (p *Parlia) Delay(chain consensus.ChainReader, header *types.Header, leftOv return &delay } +// AssembleSignature assemble the signature for block header +func (p *Parlia) AssembleSignature(block *types.Block) (*types.Block, error) { + header := block.Header() + // Don't hold the val fields for the entire sealing procedure + p.lock.RLock() + val, signFn := p.val, p.signFn + p.lock.RUnlock() + sig, err := signFn(accounts.Account{Address: val}, accounts.MimetypeParlia, ParliaRLP(header, p.chainConfig.ChainID)) + if err != nil { + log.Error("Sign for the block header failed when sealing", "err", err) + return nil, err + } + copy(header.Extra[len(header.Extra)-extraSeal:], sig) + block = block.WithSeal(header) + return block, nil +} + // Seal implements consensus.Engine, attempting to create a sealed block using // the local signing credentials. func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { @@ -1616,12 +1632,15 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res copy(header.Extra[len(header.Extra)-extraSeal:], sig) if p.shouldWaitForCurrentBlockProcess(chain, header, snap) { - log.Info("Waiting for received in turn block to process") + highestVerifiedHeader := chain.GetHighestVerifiedHeader() + // including time for writing and committing blocks + waitProcessEstimate := math.Ceil(float64(highestVerifiedHeader.GasUsed) / float64(100_000_000)) + log.Info("Waiting for received in turn block to process", "waitProcessEstimate(Seconds)", waitProcessEstimate) select { case <-stop: log.Info("Received block process finished, abort block seal") return - case <-time.After(time.Duration(processBackOffTime) * time.Second): + case <-time.After(time.Duration(waitProcessEstimate) * time.Second): if chain.CurrentHeader().Number.Uint64() >= header.Number.Uint64() { log.Info("Process backoff time exhausted, and current header has updated to abort this seal") return @@ -2105,6 +2124,10 @@ func (p *Parlia) backOffTime(snap *Snapshot, header *types.Header, val common.Ad backOffSteps[i], backOffSteps[j] = backOffSteps[j], backOffSteps[i] }) + for i := uint64(0); i < uint64(n); i++ { + log.Debug("backOffTime", "Number", header.Number, "val", validators[i], "delay", delay+backOffSteps[i]*wiggleTime) + } + delay += backOffSteps[idx] * wiggleTime return delay } diff --git a/core/block_validator.go b/core/block_validator.go index b82965a99d..d15e2cd786 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -66,31 +66,6 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin return validator } -// ValidateListsInBody validates that UncleHash, WithdrawalsHash, and WithdrawalsHash correspond to the lists in the block body, respectively. -func ValidateListsInBody(block *types.Block) error { - header := block.Header() - if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash { - return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash) - } - if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { - return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) - } - // Withdrawals are present after the Shanghai fork. - if header.WithdrawalsHash != nil { - // Withdrawals list must be present in body after Shanghai. - if block.Withdrawals() == nil { - return errors.New("missing withdrawals in block body") - } - if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash { - return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash) - } - } else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars - // Withdrawals are not allowed prior to shanghai fork - return errors.New("withdrawals present in block body") - } - return nil -} - // ValidateBody validates the given block's uncles and verifies the block // header's transaction and uncle roots. The headers are assumed to be already // validated at this point. @@ -108,12 +83,31 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if err := v.engine.VerifyUncles(v.bc, block); err != nil { return err } + if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash { + return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash) + } validateFuns := []func() error{ func() error { - return ValidateListsInBody(block) + if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { + return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) + } + return nil }, func() error { + // Withdrawals are present after the Shanghai fork. + if header.WithdrawalsHash != nil { + // Withdrawals list must be present in body after Shanghai. + if block.Withdrawals() == nil { + return errors.New("missing withdrawals in block body") + } + if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash { + return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash) + } + } else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars + // Withdrawals are not allowed prior to shanghai fork + return errors.New("withdrawals present in block body") + } // Blob transactions may be present after the Cancun fork. var blobs int for i, tx := range block.Transactions() { diff --git a/core/blockchain.go b/core/blockchain.go index 23d42f2b7f..b2a56d74bf 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -100,6 +100,8 @@ var ( blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil) + blockRecvTimeDiffGauge = metrics.NewRegisteredGauge("chain/block/recvtimediff", nil) + errStateRootVerificationFailed = errors.New("state root verification failed") errInsertionInterrupted = errors.New("insertion is interrupted") errChainStopped = errors.New("blockchain is stopped") @@ -1803,7 +1805,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } bc.hc.tdCache.Add(block.Hash(), externTd) bc.blockCache.Add(block.Hash(), block) - bc.receiptsCache.Add(block.Hash(), receipts) + bc.cacheReceipts(block.Hash(), receipts, block) if bc.chainConfig.IsCancun(block.Number(), block.Time()) { bc.sidecarsCache.Add(block.Hash(), block.Sidecars()) } @@ -2055,6 +2057,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) return 0, nil } + if len(chain) > 0 { + blockRecvTimeDiffGauge.Update(time.Now().Unix() - int64(chain[0].Time())) + } // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) signer := types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()) go SenderCacher.RecoverFromBlocks(signer, chain) @@ -2320,8 +2325,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) return it.index, err } - bc.cacheReceipts(block.Hash(), receipts, block) - // Update the metrics touched during block commit accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them diff --git a/core/forkchoice.go b/core/forkchoice.go index bea7652fa2..4e931ccf50 100644 --- a/core/forkchoice.go +++ b/core/forkchoice.go @@ -121,12 +121,19 @@ func (f *ForkChoice) ReorgNeeded(current *types.Header, extern *types.Header) (b if f.preserve != nil { currentPreserve, externPreserve = f.preserve(current), f.preserve(extern) } - doubleSign := (extern.Coinbase == current.Coinbase) - reorg = !currentPreserve && (externPreserve || - extern.Time < current.Time || - extern.Time == current.Time && - ((doubleSign && extern.Hash().Cmp(current.Hash()) < 0) || - (!doubleSign && f.rand.Float64() < 0.5))) + choiceRules := func() bool { + if extern.Time == current.Time { + doubleSign := (extern.Coinbase == current.Coinbase) + if doubleSign { + return extern.Hash().Cmp(current.Hash()) < 0 + } else { + return f.rand.Float64() < 0.5 + } + } else { + return extern.Time < current.Time + } + } + reorg = !currentPreserve && (externPreserve || choiceRules()) } return reorg, nil } diff --git a/core/rawdb/prunedfreezer.go b/core/rawdb/prunedfreezer.go index b9c18ac2aa..c772ca7630 100644 --- a/core/rawdb/prunedfreezer.go +++ b/core/rawdb/prunedfreezer.go @@ -68,6 +68,7 @@ func newPrunedFreezer(datadir string, db ethdb.KeyValueStore, offset uint64) (*p // repair init frozen , compatible disk-ancientdb and pruner-block-tool. func (f *prunedfreezer) repair(datadir string) error { + offset := atomic.LoadUint64(&f.frozen) // compatible freezer minItems := uint64(math.MaxUint64) for name, disableSnappy := range chainFreezerNoSnappy { @@ -96,19 +97,14 @@ func (f *prunedfreezer) repair(datadir string) error { table.Close() } - // If minItems is non-zero, it indicates that the chain freezer was previously enabled, and we should use minItems as the current frozen value. - // If minItems is zero, it indicates that the pruneAncient was previously enabled, and we should continue using frozen - // (retrieved from CurrentAncientFreezer) as the current frozen value. - offset := minItems - if offset == 0 { - // no item in ancientDB, init `offset` to the `f.frozen` - offset = atomic.LoadUint64(&f.frozen) - } - log.Info("Read ancientdb item counts", "items", minItems, "offset", offset) + // If the dataset has undergone a prune block, the offset is a non-zero value, otherwise the offset is a zero value. + // The minItems is the value relative to offset + offset += minItems // FrozenOfAncientFreezer is the progress of the last prune-freezer freeze. frozenInDB := ReadFrozenOfAncientFreezer(f.db) maxOffset := max(offset, frozenInDB) + log.Info("Read ancient db item counts", "items", minItems, "frozen", maxOffset) atomic.StoreUint64(&f.frozen, maxOffset) if err := f.Sync(); err != nil { @@ -161,12 +157,12 @@ func (f *prunedfreezer) AncientOffSet() uint64 { // MigrateTable processes the entries in a given table in sequence // converting them to a new format if they're of an old format. -func (db *prunedfreezer) MigrateTable(kind string, convert convertLegacyFn) error { +func (f *prunedfreezer) MigrateTable(kind string, convert convertLegacyFn) error { return errNotSupported } // AncientDatadir returns an error as we don't have a backing chain freezer. -func (db *prunedfreezer) AncientDatadir() (string, error) { +func (f *prunedfreezer) AncientDatadir() (string, error) { return "", errNotSupported } diff --git a/core/systemcontracts/upgrade.go b/core/systemcontracts/upgrade.go index 714aa16115..83a2aa491a 100644 --- a/core/systemcontracts/upgrade.go +++ b/core/systemcontracts/upgrade.go @@ -4,10 +4,10 @@ import ( "encoding/hex" "fmt" "math/big" + "reflect" "strings" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/systemcontracts/bohr" "github.com/ethereum/go-ethereum/core/systemcontracts/bruno" "github.com/ethereum/go-ethereum/core/systemcontracts/euler" @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/core/systemcontracts/planck" "github.com/ethereum/go-ethereum/core/systemcontracts/plato" "github.com/ethereum/go-ethereum/core/systemcontracts/ramanujan" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -40,7 +41,7 @@ type Upgrade struct { Configs []*UpgradeConfig } -type upgradeHook func(blockNumber *big.Int, contractAddr common.Address, statedb *state.StateDB) error +type upgradeHook func(blockNumber *big.Int, contractAddr common.Address, statedb vm.StateDB) error const ( mainNet = "Mainnet" @@ -789,10 +790,11 @@ func init() { } } -func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.Int, lastBlockTime uint64, blockTime uint64, statedb *state.StateDB) { - if config == nil || blockNumber == nil || statedb == nil { +func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.Int, lastBlockTime uint64, blockTime uint64, statedb vm.StateDB) { + if config == nil || blockNumber == nil || statedb == nil || reflect.ValueOf(statedb).IsNil() { return } + var network string switch GenesisHash { /* Add mainnet genesis hash */ @@ -876,7 +878,7 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I */ } -func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb *state.StateDB, logger log.Logger) { +func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb vm.StateDB, logger log.Logger) { if upgrade == nil { logger.Info("Empty upgrade config", "height", blockNumber.String()) return diff --git a/core/systemcontracts/upgrade_test.go b/core/systemcontracts/upgrade_test.go index 1d8270fd94..3f88d7687b 100644 --- a/core/systemcontracts/upgrade_test.go +++ b/core/systemcontracts/upgrade_test.go @@ -2,9 +2,13 @@ package systemcontracts import ( "crypto/sha256" + "math/big" "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -39,3 +43,31 @@ func TestAllCodesHash(t *testing.T) { allCodeHash := sha256.Sum256(allCodes) require.Equal(t, allCodeHash[:], common.Hex2Bytes("833cc0fc87c46ad8a223e44ccfdc16a51a7e7383525136441bd0c730f06023df")) } + +func TestUpgradeBuildInSystemContractNilInterface(t *testing.T) { + var ( + config = params.BSCChainConfig + blockNumber = big.NewInt(37959559) + lastBlockTime uint64 = 1713419337 + blockTime uint64 = 1713419340 + statedb vm.StateDB + ) + + GenesisHash = params.BSCGenesisHash + + UpgradeBuildInSystemContract(config, blockNumber, lastBlockTime, blockTime, statedb) +} + +func TestUpgradeBuildInSystemContractNilValue(t *testing.T) { + var ( + config = params.BSCChainConfig + blockNumber = big.NewInt(37959559) + lastBlockTime uint64 = 1713419337 + blockTime uint64 = 1713419340 + statedb vm.StateDB = (*state.StateDB)(nil) + ) + + GenesisHash = params.BSCGenesisHash + + UpgradeBuildInSystemContract(config, blockNumber, lastBlockTime, blockTime, statedb) +} diff --git a/core/types/blob_sidecar.go b/core/types/blob_sidecar.go index d4f63602d0..a97d1ed406 100644 --- a/core/types/blob_sidecar.go +++ b/core/types/blob_sidecar.go @@ -2,10 +2,12 @@ package types import ( "bytes" + "encoding/json" "errors" "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rlp" ) @@ -53,3 +55,40 @@ func (s *BlobSidecar) SanityCheck(blockNumber *big.Int, blockHash common.Hash) e } return nil } + +func (s *BlobSidecar) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{ + "blockHash": s.BlockHash, + "blockNumber": hexutil.EncodeUint64(s.BlockNumber.Uint64()), + "txHash": s.TxHash, + "txIndex": hexutil.EncodeUint64(s.TxIndex), + } + fields["blobSidecar"] = s.BlobTxSidecar + return json.Marshal(fields) +} + +func (s *BlobSidecar) UnmarshalJSON(input []byte) error { + type blobSidecar struct { + BlobSidecar BlobTxSidecar `json:"blobSidecar"` + BlockNumber *hexutil.Big `json:"blockNumber"` + BlockHash common.Hash `json:"blockHash"` + TxIndex *hexutil.Big `json:"txIndex"` + TxHash common.Hash `json:"txHash"` + } + var blob blobSidecar + if err := json.Unmarshal(input, &blob); err != nil { + return err + } + s.BlobTxSidecar = blob.BlobSidecar + if blob.BlockNumber == nil { + return errors.New("missing required field 'blockNumber' for BlobSidecar") + } + s.BlockNumber = blob.BlockNumber.ToInt() + s.BlockHash = blob.BlockHash + if blob.TxIndex == nil { + return errors.New("missing required field 'txIndex' for BlobSidecar") + } + s.TxIndex = blob.TxIndex.ToInt().Uint64() + s.TxHash = blob.TxHash + return nil +} diff --git a/core/types/block.go b/core/types/block.go index c7619f3f76..c0fd3e8a7e 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -19,6 +19,7 @@ package types import ( "encoding/binary" + "encoding/json" "fmt" "io" "math/big" @@ -551,6 +552,14 @@ func (b *Block) WithSidecars(sidecars BlobSidecars) *Block { return block } +func (b *Block) DeepCopySidecars(sidecars BlobSidecars) { + b.sidecars = make(BlobSidecars, len(sidecars)) + if len(sidecars) != 0 { + buffer, _ := json.Marshal(sidecars) + json.Unmarshal(buffer, &b.sidecars) + } +} + // Hash returns the keccak256 hash of b's header. // The hash is computed on the first call and cached thereafter. func (b *Block) Hash() common.Hash { diff --git a/core/vote/vote_manager.go b/core/vote/vote_manager.go index 891785482b..35b3412f47 100644 --- a/core/vote/vote_manager.go +++ b/core/vote/vote_manager.go @@ -31,6 +31,7 @@ var notContinuousJustified = metrics.NewRegisteredCounter("votesManager/notConti // Backend wraps all methods required for voting. type Backend interface { IsMining() bool + VoteEnabled() bool EventMux() *event.TypeMux } @@ -136,6 +137,11 @@ func (voteManager *VoteManager) loop() { log.Debug("skip voting because mining is disabled, continue") continue } + if !voteManager.eth.VoteEnabled() { + log.Debug("skip voting because voting is disabled, continue") + continue + } + blockCountSinceMining++ if blockCountSinceMining <= blocksNumberSinceMining { log.Debug("skip voting", "blockCountSinceMining", blockCountSinceMining, "blocksNumberSinceMining", blocksNumberSinceMining) diff --git a/core/vote/vote_pool_test.go b/core/vote/vote_pool_test.go index bb8374e90f..4ccc2f0bae 100644 --- a/core/vote/vote_pool_test.go +++ b/core/vote/vote_pool_test.go @@ -77,6 +77,7 @@ func newTestBackend() *testBackend { return &testBackend{eventMux: new(event.TypeMux)} } func (b *testBackend) IsMining() bool { return true } +func (b *testBackend) VoteEnabled() bool { return true } func (b *testBackend) EventMux() *event.TypeMux { return b.eventMux } func (p *mockPOSA) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) { diff --git a/crypto/bn256/cloudflare/gfp12.go b/crypto/bn256/cloudflare/gfp12.go index 93fb368a7b..4e080f3ad3 100644 --- a/crypto/bn256/cloudflare/gfp12.go +++ b/crypto/bn256/cloudflare/gfp12.go @@ -1,7 +1,7 @@ package bn256 // For details of the algorithms used, see "Multiplication and Squaring on -// Pairing-Friendly Fields, Devegili et al. +// Pairing-Friendly Fields", Devegili et al. // http://eprint.iacr.org/2006/471.pdf. import ( diff --git a/crypto/bn256/cloudflare/gfp2.go b/crypto/bn256/cloudflare/gfp2.go index 90a89e8b47..094fb1460e 100644 --- a/crypto/bn256/cloudflare/gfp2.go +++ b/crypto/bn256/cloudflare/gfp2.go @@ -1,7 +1,7 @@ package bn256 // For details of the algorithms used, see "Multiplication and Squaring on -// Pairing-Friendly Fields, Devegili et al. +// Pairing-Friendly Fields", Devegili et al. // http://eprint.iacr.org/2006/471.pdf. // gfP2 implements a field of size p² as a quadratic extension of the base field diff --git a/crypto/bn256/cloudflare/gfp6.go b/crypto/bn256/cloudflare/gfp6.go index a42734911c..72f552cd35 100644 --- a/crypto/bn256/cloudflare/gfp6.go +++ b/crypto/bn256/cloudflare/gfp6.go @@ -1,7 +1,7 @@ package bn256 // For details of the algorithms used, see "Multiplication and Squaring on -// Pairing-Friendly Fields, Devegili et al. +// Pairing-Friendly Fields", Devegili et al. // http://eprint.iacr.org/2006/471.pdf. // gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ diff --git a/crypto/bn256/google/gfp12.go b/crypto/bn256/google/gfp12.go index f084eddf21..39b407e80a 100644 --- a/crypto/bn256/google/gfp12.go +++ b/crypto/bn256/google/gfp12.go @@ -5,7 +5,7 @@ package bn256 // For details of the algorithms used, see "Multiplication and Squaring on -// Pairing-Friendly Fields, Devegili et al. +// Pairing-Friendly Fields", Devegili et al. // http://eprint.iacr.org/2006/471.pdf. import ( diff --git a/crypto/bn256/google/gfp2.go b/crypto/bn256/google/gfp2.go index 3981f6cb4f..9cc854e3f6 100644 --- a/crypto/bn256/google/gfp2.go +++ b/crypto/bn256/google/gfp2.go @@ -5,7 +5,7 @@ package bn256 // For details of the algorithms used, see "Multiplication and Squaring on -// Pairing-Friendly Fields, Devegili et al. +// Pairing-Friendly Fields", Devegili et al. // http://eprint.iacr.org/2006/471.pdf. import ( diff --git a/crypto/bn256/google/gfp6.go b/crypto/bn256/google/gfp6.go index 218856617c..3fe3d344ca 100644 --- a/crypto/bn256/google/gfp6.go +++ b/crypto/bn256/google/gfp6.go @@ -5,7 +5,7 @@ package bn256 // For details of the algorithms used, see "Multiplication and Squaring on -// Pairing-Friendly Fields, Devegili et al. +// Pairing-Friendly Fields", Devegili et al. // http://eprint.iacr.org/2006/471.pdf. import ( diff --git a/eth/api_backend.go b/eth/api_backend.go index d72711929d..6a137155ed 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -441,14 +441,14 @@ func (b *EthAPIBackend) Engine() consensus.Engine { return b.eth.engine } -func (b *EthAPIBackend) CurrentTurnLength() (turnLength uint8, err error) { +func (b *EthAPIBackend) CurrentValidators() ([]common.Address, error) { if p, ok := b.eth.engine.(*parlia.Parlia); ok { service := p.APIs(b.Chain())[0].Service currentHead := rpc.LatestBlockNumber - return service.(*parlia.API).GetTurnLength(¤tHead) + return service.(*parlia.API).GetValidators(¤tHead) } - return 1, nil + return []common.Address{}, errors.New("not supported") } func (b *EthAPIBackend) CurrentHeader() *types.Header { diff --git a/eth/api_miner.go b/eth/api_miner.go index b8d571a475..f187cf7c4d 100644 --- a/eth/api_miner.go +++ b/eth/api_miner.go @@ -20,6 +20,7 @@ import ( "math/big" "time" + "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/common" @@ -73,7 +74,7 @@ func (api *MinerAPI) SetGasPrice(gasPrice hexutil.Big) bool { // SetGasLimit sets the gaslimit to target towards during mining. func (api *MinerAPI) SetGasLimit(gasLimit hexutil.Uint64) bool { api.e.Miner().SetGasCeil(uint64(gasLimit)) - if api.e.Miner().Mining() && uint64(gasLimit) > params.SystemTxsGas { + if uint64(gasLimit) > params.SystemTxsGas { api.e.TxPool().SetMaxGas(uint64(gasLimit) - params.SystemTxsGas) } return true @@ -117,3 +118,37 @@ func (api *MinerAPI) AddBuilder(builder common.Address, url string) error { func (api *MinerAPI) RemoveBuilder(builder common.Address) error { return api.e.APIBackend.RemoveBuilder(builder) } + +func (api *MinerAPI) MBConfig() miner.MBConfig { + return api.e.Miner().MBConfig() +} + +func (api *MinerAPI) ResetMaliciousBehavior() miner.MBConfig { + api.e.Miner().ResetMaliciousBehavior() + return api.e.Miner().MBConfig() +} + +func (api *MinerAPI) SetDoubleSign(on bool) miner.MBConfig { + api.e.Miner().SetDoubleSign(on) + return api.e.Miner().MBConfig() +} + +func (api *MinerAPI) SetVoteDisable(on bool) miner.MBConfig { + api.e.Miner().SetVoteDisable(on) + return api.e.Miner().MBConfig() +} + +func (api *MinerAPI) SetSkipOffsetInturn(offset uint64) miner.MBConfig { + api.e.Miner().SetSkipOffsetInturn(offset) + return api.e.Miner().MBConfig() +} + +func (api *MinerAPI) SetBroadcastDelayBlocks(num uint64) miner.MBConfig { + api.e.Miner().SetBroadcastDelayBlocks(num) + return api.e.Miner().MBConfig() +} + +func (api *MinerAPI) SetLastBlockMiningTime(time uint64) miner.MBConfig { + api.e.Miner().SetLastBlockMiningTime(time) + return api.e.Miner().MBConfig() +} diff --git a/eth/backend.go b/eth/backend.go index 0d434a9130..c0e41b1fcd 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -621,6 +621,7 @@ func (s *Ethereum) StopMining() { } func (s *Ethereum) IsMining() bool { return s.miner.Mining() } +func (s *Ethereum) VoteEnabled() bool { return s.miner.VoteEnabled() } func (s *Ethereum) Miner() *miner.Miner { return s.miner } func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 09925d7d66..8338fd9316 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -327,7 +327,7 @@ func (d *Downloader) UnregisterPeer(id string) error { // LegacySync tries to sync up our local blockchain with a remote peer, both // adding various sanity checks and wrapping it with various log entries. -func (d *Downloader) LegacySync(id string, head common.Hash, td *big.Int, ttd *big.Int, mode SyncMode) error { +func (d *Downloader) LegacySync(id string, head common.Hash, name string, td *big.Int, ttd *big.Int, mode SyncMode) error { err := d.synchronise(id, head, td, ttd, mode, false, nil) switch err { @@ -337,7 +337,7 @@ func (d *Downloader) LegacySync(id string, head common.Hash, td *big.Int, ttd *b if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { - log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) + log.Warn("Synchronisation failed, dropping peer", "peer", id, "name", name, "td", td, "err", err) if d.dropPeer == nil { // The dropPeer method is nil when `--copydb` is used for a local copy. // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 3c113b9134..0a007644d2 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -902,7 +902,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { // Simulate a synchronisation and check the required result tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync) + tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), "", big.NewInt(1000), nil, FullSync) if _, ok := tester.peers[id]; !ok != tt.drop { t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index fa5a324984..19f8c1ffb8 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -868,9 +868,9 @@ func (f *BlockFetcher) importHeaders(op *blockOrHeaderInject) { parent := f.getHeader(header.ParentHash) if parent == nil { log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash) - time.Sleep(reQueueBlockTimeout) // forget block first, then re-queue f.done <- hash + time.Sleep(reQueueBlockTimeout) f.requeue <- op return } @@ -909,9 +909,9 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) { parent := f.getBlock(block.ParentHash()) if parent == nil { log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) - time.Sleep(reQueueBlockTimeout) // forget block first, then re-queue f.done <- hash + time.Sleep(reQueueBlockTimeout) f.requeue <- op return } diff --git a/eth/handler.go b/eth/handler.go index 23dba9e14d..f26162024b 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -321,21 +321,14 @@ func newHandler(config *handlerConfig) (*handler, error) { broadcastBlockWithCheck := func(block *types.Block, propagate bool) { if propagate { - checkErrs := make(chan error, 2) - - go func() { - checkErrs <- core.ValidateListsInBody(block) - }() - go func() { - checkErrs <- core.IsDataAvailable(h.chain, block) - }() - - for i := 0; i < cap(checkErrs); i++ { - err := <-checkErrs - if err != nil { - log.Error("Propagating invalid block", "number", block.Number(), "hash", block.Hash(), "err", err) - return - } + if !(block.Header().WithdrawalsHash == nil && block.Withdrawals() == nil) && + !(block.Header().EmptyWithdrawalsHash() && block.Withdrawals() != nil && len(block.Withdrawals()) == 0) { + log.Error("Propagated block has invalid withdrawals") + return + } + if err := core.IsDataAvailable(h.chain, block); err != nil { + log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err) + return } } h.BroadcastBlock(block, propagate) @@ -483,13 +476,13 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { h.peersPerIP[remoteIP] = h.peersPerIP[remoteIP] + 1 h.peerPerIPLock.Unlock() } - peer.Log().Debug("Ethereum peer connected", "name", peer.Name()) // Register the peer locally if err := h.peers.registerPeer(peer, snap, trust, bsc); err != nil { peer.Log().Error("Ethereum peer registration failed", "err", err) return err } + peer.Log().Debug("Ethereum peer connected", "name", peer.Name(), "peers.len", h.peers.len()) defer h.unregisterPeer(peer.ID()) p := h.peers.peer(peer.ID()) @@ -632,7 +625,7 @@ func (h *handler) runBscExtension(peer *bsc.Peer, handler bsc.Handler) error { bsc.EgressRegistrationErrorMeter.Mark(1) } } - peer.Log().Error("Bsc extension registration failed", "err", err) + peer.Log().Error("Bsc extension registration failed", "err", err, "name", peer.Name()) return err } return handler(peer) diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index d1e07df25c..46bc97fbb8 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" ) @@ -46,7 +47,7 @@ var ProtocolVersions = []uint{ETH68} var protocolLengths = map[uint]uint64{ETH68: 17} // maxMessageSize is the maximum cap on the size of a protocol message. -const maxMessageSize = 10 * 1024 * 1024 +var maxMessageSize = params.MaxMessageSize const ( StatusMsg = 0x00 diff --git a/eth/sync.go b/eth/sync.go index 3b04d09920..db0ec62617 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -233,7 +233,7 @@ func (cs *chainSyncer) startSync(op *chainSyncOp) { // doSync synchronizes the local blockchain with a remote peer. func (h *handler) doSync(op *chainSyncOp) error { // Run the sync cycle, and disable snap sync if we're past the pivot block - err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode) + err := h.downloader.LegacySync(op.peer.ID(), op.head, op.peer.Name(), op.td, h.chain.Config().TerminalTotalDifficulty, op.mode) if err != nil { return err } @@ -248,6 +248,9 @@ func (h *handler) doSync(op *chainSyncOp) error { // degenerate connectivity, but it should be healthy for the mainnet too to // more reliably update peers or the local TD state. if block := h.chain.GetBlock(head.Hash(), head.Number.Uint64()); block != nil { + if h.chain.Config().IsCancun(block.Number(), block.Time()) { + block = block.WithSidecars(h.chain.GetSidecarsByHash(block.Hash())) + } h.BroadcastBlock(block, false) } } diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index bf26c79aa8..8cf50eb528 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -131,8 +131,8 @@ func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumb } // BlobSidecars return the Sidecars of a given block number or hash. -func (ec *Client) BlobSidecars(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.BlobTxSidecar, error) { - var r []*types.BlobTxSidecar +func (ec *Client) BlobSidecars(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.BlobSidecar, error) { + var r []*types.BlobSidecar err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecars", blockNrOrHash.String()) if err == nil && r == nil { return nil, ethereum.NotFound @@ -141,8 +141,8 @@ func (ec *Client) BlobSidecars(ctx context.Context, blockNrOrHash rpc.BlockNumbe } // BlobSidecarByTxHash return a sidecar of a given blob transaction -func (ec *Client) BlobSidecarByTxHash(ctx context.Context, hash common.Hash) (*types.BlobTxSidecar, error) { - var r *types.BlobTxSidecar +func (ec *Client) BlobSidecarByTxHash(ctx context.Context, hash common.Hash) (*types.BlobSidecar, error) { + var r *types.BlobSidecar err := ec.c.CallContext(ctx, &r, "eth_getBlobSidecarByTxHash", hash) if err == nil && r == nil { return nil, ethereum.NotFound diff --git a/go.mod b/go.mod index 153f7770ab..55c619ba3b 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,6 @@ require ( github.com/fatih/color v1.16.0 github.com/fatih/structs v1.1.0 github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e - github.com/fjl/memsize v0.0.2 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 diff --git a/go.sum b/go.sum index 7e8baebe66..e6940c07b4 100644 --- a/go.sum +++ b/go.sum @@ -335,8 +335,6 @@ github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 h1:6dVcS0LktRSyEE github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= -github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= -github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= diff --git a/internal/debug/flags.go b/internal/debug/flags.go index dac878a7b1..29f7142587 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -30,7 +30,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics/exp" - "github.com/fjl/memsize/memsizeui" "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" "github.com/urfave/cli/v2" @@ -38,8 +37,6 @@ import ( "gopkg.in/natefinch/lumberjack.v2" ) -var Memsize memsizeui.Handler - var ( verbosityFlag = &cli.IntFlag{ Name: "verbosity", @@ -313,7 +310,6 @@ func StartPProf(address string, withMetrics bool) { if withMetrics { exp.Exp(metrics.DefaultRegistry) } - http.Handle("/memsize/", http.StripPrefix("/memsize", &Memsize)) log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address)) go func() { if err := http.ListenAndServe(address, nil); err != nil { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 1c81ba544c..6a2ac19319 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -862,54 +862,72 @@ func (s *BlockChainAPI) Health() bool { return true } -// GetFinalizedHeader returns the requested finalized block header. -// - probabilisticFinalized should be in range [2,21], -// then the block header with number `max(fastFinalized, latest-probabilisticFinalized)` is returned -func (s *BlockChainAPI) GetFinalizedHeader(ctx context.Context, probabilisticFinalized int64) (map[string]interface{}, error) { - if probabilisticFinalized < 2 || probabilisticFinalized > 21 { - return nil, fmt.Errorf("%d out of range [2,21]", probabilisticFinalized) +func (s *BlockChainAPI) getFinalizedNumber(ctx context.Context, verifiedValidatorNum int64) (int64, error) { + parliaConfig := s.b.ChainConfig().Parlia + if parliaConfig == nil { + return 0, fmt.Errorf("only parlia engine supported") } - currentTurnLength, err := s.b.CurrentTurnLength() + curValidators, err := s.b.CurrentValidators() if err != nil { // impossible - return nil, err + return 0, err } + valLen := int64(len(curValidators)) + if verifiedValidatorNum < 1 || verifiedValidatorNum > valLen { + return 0, fmt.Errorf("%d out of range [1,%d]", verifiedValidatorNum, valLen) + } + fastFinalizedHeader, err := s.b.HeaderByNumber(ctx, rpc.FinalizedBlockNumber) if err != nil { // impossible - return nil, err + return 0, err } + latestHeader, err := s.b.HeaderByNumber(ctx, rpc.LatestBlockNumber) if err != nil { // impossible - return nil, err + return 0, err + } + lastHeader := latestHeader + confirmedValSet := make(map[common.Address]struct{}, valLen) + confirmedValSet[lastHeader.Coinbase] = struct{}{} + for count := 1; int64(len(confirmedValSet)) < verifiedValidatorNum && count <= int(parliaConfig.Epoch) && lastHeader.Number.Int64() > max(fastFinalizedHeader.Number.Int64(), 1); count++ { + lastHeader, err = s.b.HeaderByHash(ctx, lastHeader.ParentHash) + if err != nil { // impossible + return 0, err + } + confirmedValSet[lastHeader.Coinbase] = struct{}{} } - finalizedBlockNumber := max(fastFinalizedHeader.Number.Int64(), latestHeader.Number.Int64()-probabilisticFinalized*int64(currentTurnLength)) - return s.GetHeaderByNumber(ctx, rpc.BlockNumber(finalizedBlockNumber)) -} + finalizedBlockNumber := max(fastFinalizedHeader.Number.Int64(), lastHeader.Number.Int64()) + log.Debug("getFinalizedNumber", "LatestBlockNumber", latestHeader.Number.Int64(), "fastFinalizedHeight", fastFinalizedHeader.Number.Int64(), + "lastHeader", lastHeader.Number.Int64(), "finalizedBlockNumber", finalizedBlockNumber, "len(confirmedValSet)", len(confirmedValSet)) -// GetFinalizedBlock returns the requested finalized block. -// - probabilisticFinalized should be in range [2,21], -// then the block with number `max(fastFinalized, latest-probabilisticFinalized)` is returned -// - When fullTx is true all transactions in the block are returned, otherwise -// only the transaction hash is returned. -func (s *BlockChainAPI) GetFinalizedBlock(ctx context.Context, probabilisticFinalized int64, fullTx bool) (map[string]interface{}, error) { - if probabilisticFinalized < 2 || probabilisticFinalized > 21 { - return nil, fmt.Errorf("%d out of range [2,21]", probabilisticFinalized) - } + return finalizedBlockNumber, nil +} - currentTurnLength, err := s.b.CurrentTurnLength() - if err != nil { // impossible - return nil, err - } - fastFinalizedHeader, err := s.b.HeaderByNumber(ctx, rpc.FinalizedBlockNumber) +// GetFinalizedHeader returns the finalized block header based on the specified parameters. +// - `verifiedValidatorNum` must be within the range [1, len(currentValidators)]. +// - The function calculates `probabilisticFinalizedHeight` as the highest height of the block verified by `verifiedValidatorNum` validators, +// it then returns the block header with a height equal to `max(fastFinalizedHeight, probabilisticFinalizedHeight)`. +// - The height of the returned block header is guaranteed to be monotonically increasing. +func (s *BlockChainAPI) GetFinalizedHeader(ctx context.Context, verifiedValidatorNum int64) (map[string]interface{}, error) { + finalizedBlockNumber, err := s.getFinalizedNumber(ctx, verifiedValidatorNum) if err != nil { // impossible return nil, err } - latestHeader, err := s.b.HeaderByNumber(ctx, rpc.LatestBlockNumber) + return s.GetHeaderByNumber(ctx, rpc.BlockNumber(finalizedBlockNumber)) +} + +// GetFinalizedBlock returns the finalized block based on the specified parameters. +// - `verifiedValidatorNum` must be within the range [1, len(currentValidators)]. +// - The function calculates `probabilisticFinalizedHeight` as the highest height of the block verified by `verifiedValidatorNum` validators, +// it then returns the block with a height equal to `max(fastFinalizedHeight, probabilisticFinalizedHeight)`. +// - If `fullTx` is true, the block includes all transactions; otherwise, only transaction hashes are included. +// - The height of the returned block is guaranteed to be monotonically increasing. +func (s *BlockChainAPI) GetFinalizedBlock(ctx context.Context, verifiedValidatorNum int64, fullTx bool) (map[string]interface{}, error) { + finalizedBlockNumber, err := s.getFinalizedNumber(ctx, verifiedValidatorNum) if err != nil { // impossible return nil, err } - finalizedBlockNumber := max(fastFinalizedHeader.Number.Int64(), latestHeader.Number.Int64()-probabilisticFinalized*int64(currentTurnLength)) return s.GetBlockByNumber(ctx, rpc.BlockNumber(finalizedBlockNumber), fullTx) } diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index ac467c352a..8f1b8e3b0f 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -631,9 +631,9 @@ func (b testBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transactio func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.Subscription { panic("implement me") } -func (b testBackend) ChainConfig() *params.ChainConfig { return b.chain.Config() } -func (b testBackend) Engine() consensus.Engine { return b.chain.Engine() } -func (b testBackend) CurrentTurnLength() (uint8, error) { return 1, nil } +func (b testBackend) ChainConfig() *params.ChainConfig { return b.chain.Config() } +func (b testBackend) Engine() consensus.Engine { return b.chain.Engine() } +func (b testBackend) CurrentValidators() ([]common.Address, error) { return []common.Address{}, nil } func (b testBackend) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { panic("implement me") } diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 79492cda85..0cb3fb25cd 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -89,8 +89,8 @@ type Backend interface { ChainConfig() *params.ChainConfig Engine() consensus.Engine - // CurrentTurnLength return the turnLength at the latest block - CurrentTurnLength() (uint8, error) + // CurrentValidators return the list of validator at the latest block + CurrentValidators() ([]common.Address, error) // This is copied from filters.Backend // eth/filters needs to be initialized from this backend type, so methods needed by diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go index 17bbc49c5a..cc00e839d6 100644 --- a/internal/ethapi/transaction_args_test.go +++ b/internal/ethapi/transaction_args_test.go @@ -416,7 +416,7 @@ func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) func (b *backendMock) Engine() consensus.Engine { return nil } -func (b *backendMock) CurrentTurnLength() (uint8, error) { return 1, nil } +func (b *backendMock) CurrentValidators() ([]common.Address, error) { return []common.Address{}, nil } func (b *backendMock) MevRunning() bool { return false } func (b *backendMock) HasBuilder(builder common.Address) bool { return false } diff --git a/miner/bid_simulator.go b/miner/bid_simulator.go index f96d3c1a8a..498f1ec2b2 100644 --- a/miner/bid_simulator.go +++ b/miner/bid_simulator.go @@ -692,6 +692,14 @@ func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) { return } + // check bid size + if bidRuntime.env.size+blockReserveSize > params.MaxMessageSize { + log.Error("BidSimulator: failed to check bid size", "builder", bidRuntime.bid.Builder, + "bidHash", bidRuntime.bid.Hash(), "env.size", bidRuntime.env.size) + err = errors.New("invalid bid size") + return + } + bestBid := b.GetBestBid(parentHash) if bestBid == nil { log.Info("[BID RESULT]", "win", "true[first]", "builder", bidRuntime.bid.Builder, "hash", bidRuntime.bid.Hash().TerminalString()) @@ -858,6 +866,7 @@ func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *para } r.env.tcount++ + r.env.size += uint32(tx.Size()) return nil } diff --git a/miner/gen_mb_config.go b/miner/gen_mb_config.go new file mode 100644 index 0000000000..32e2a1464f --- /dev/null +++ b/miner/gen_mb_config.go @@ -0,0 +1,52 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package miner + +// MarshalTOML marshals as TOML. +func (m MBConfig) MarshalTOML() (interface{}, error) { + type MBConfig struct { + DoubleSign bool + VoteDisable bool + SkipOffsetInturn *uint64 `toml:",omitempty"` + BroadcastDelayBlocks uint64 + LastBlockMiningTime uint64 + } + var enc MBConfig + enc.DoubleSign = m.DoubleSign + enc.VoteDisable = m.VoteDisable + enc.SkipOffsetInturn = m.SkipOffsetInturn + enc.BroadcastDelayBlocks = m.BroadcastDelayBlocks + enc.LastBlockMiningTime = m.LastBlockMiningTime + return &enc, nil +} + +// UnmarshalTOML unmarshals from TOML. +func (m *MBConfig) UnmarshalTOML(unmarshal func(interface{}) error) error { + type MBConfig struct { + DoubleSign *bool + VoteDisable *bool + SkipOffsetInturn *uint64 `toml:",omitempty"` + BroadcastDelayBlocks *uint64 + LastBlockMiningTime *uint64 + } + var dec MBConfig + if err := unmarshal(&dec); err != nil { + return err + } + if dec.DoubleSign != nil { + m.DoubleSign = *dec.DoubleSign + } + if dec.VoteDisable != nil { + m.VoteDisable = *dec.VoteDisable + } + if dec.SkipOffsetInturn != nil { + m.SkipOffsetInturn = dec.SkipOffsetInturn + } + if dec.BroadcastDelayBlocks != nil { + m.BroadcastDelayBlocks = *dec.BroadcastDelayBlocks + } + if dec.LastBlockMiningTime != nil { + m.LastBlockMiningTime = *dec.LastBlockMiningTime + } + return nil +} diff --git a/miner/malicious_behaviour.go b/miner/malicious_behaviour.go new file mode 100644 index 0000000000..13aad5542b --- /dev/null +++ b/miner/malicious_behaviour.go @@ -0,0 +1,22 @@ +package miner + +//go:generate go run github.com/fjl/gencodec -type MBConfig -formats toml -out gen_mb_config.go +type MBConfig struct { + // Generate two consecutive blocks for the same parent block + DoubleSign bool + // Disable voting for Fast Finality + VoteDisable bool + // Skip block production for in-turn validators at a specified offset + SkipOffsetInturn *uint64 `toml:",omitempty"` + // Delay broadcasting mined blocks by a specified number of blocks, only for in turn validators + BroadcastDelayBlocks uint64 + // Mining time (milliseconds) for the last block in every turn + LastBlockMiningTime uint64 +} + +var DefaultMBConfig = MBConfig{ + DoubleSign: false, + VoteDisable: false, + BroadcastDelayBlocks: 0, + LastBlockMiningTime: 0, +} diff --git a/miner/miner.go b/miner/miner.go index aaef07932d..d40dcc9c21 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -58,11 +58,12 @@ type Config struct { DisableVoteAttestation bool // Whether to skip assembling vote attestation Mev MevConfig // Mev configuration + MB MBConfig // Malicious behavior configuration } // DefaultConfig contains default settings for miner. var DefaultConfig = Config{ - GasCeil: 30000000, + GasCeil: 0, GasPrice: big.NewInt(params.GWei), // The default recommit time is chosen as two seconds since @@ -74,6 +75,7 @@ var DefaultConfig = Config{ DelayLeftOver: 50 * time.Millisecond, Mev: DefaultMevConfig, + MB: DefaultMBConfig, } // Miner creates blocks and searches for proof-of-work values. @@ -202,6 +204,10 @@ func (miner *Miner) Mining() bool { return miner.worker.isRunning() } +func (miner *Miner) VoteEnabled() bool { + return miner.worker.config.VoteEnable && !miner.worker.config.MB.VoteDisable +} + func (miner *Miner) InTurn() bool { return miner.worker.inTurn() } @@ -285,6 +291,34 @@ func (miner *Miner) SetGasCeil(ceil uint64) { miner.worker.setGasCeil(ceil) } +func (miner *Miner) MBConfig() MBConfig { + return miner.worker.config.MB +} + +func (miner *Miner) ResetMaliciousBehavior() { + miner.worker.config.MB = DefaultMBConfig +} + +func (miner *Miner) SetDoubleSign(on bool) { + miner.worker.config.MB.DoubleSign = on +} + +func (miner *Miner) SetVoteDisable(on bool) { + miner.worker.config.MB.VoteDisable = on +} + +func (miner *Miner) SetSkipOffsetInturn(offset uint64) { + miner.worker.config.MB.SkipOffsetInturn = &offset +} + +func (miner *Miner) SetBroadcastDelayBlocks(num uint64) { + miner.worker.config.MB.BroadcastDelayBlocks = num +} + +func (miner *Miner) SetLastBlockMiningTime(time uint64) { + miner.worker.config.MB.LastBlockMiningTime = time +} + // SubscribePendingLogs starts delivering logs from pending transactions // to the given channel. func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription { diff --git a/miner/worker.go b/miner/worker.go index 8f09819d7d..e67f230e2a 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -43,6 +43,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" ) @@ -70,6 +71,12 @@ const ( // the default to wait for the mev miner to finish waitMEVMinerEndTimeLimit = 50 * time.Millisecond + + // Reserve block size for the following 3 components: + // a. System transactions at the end of the block + // b. Seal in the block header + // c. Overhead from RLP encoding + blockReserveSize = 100 * 1024 ) var ( @@ -89,6 +96,7 @@ type environment struct { signer types.Signer state *state.StateDB // apply state changes here tcount int // tx count in cycle + size uint32 // almost accurate block size, gasPool *core.GasPool // available gas used to pack transactions coinbase common.Address @@ -105,6 +113,7 @@ func (env *environment) copy() *environment { signer: env.signer, state: env.state.Copy(), tcount: env.tcount, + size: env.size, coinbase: env.coinbase, header: types.CopyHeader(env.header), receipts: copyReceipts(env.receipts), @@ -246,6 +255,10 @@ type worker struct { fullTaskHook func() // Method to call before pushing the full sealing task. resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. recentMinedBlocks *lru.Cache + + // Test purpose + delayedBlocksForBroadcast []*types.Block + delayedMu sync.RWMutex } func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker { @@ -300,6 +313,8 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus go worker.newWorkLoop(recommit) go worker.resultLoop() go worker.taskLoop() + worker.wg.Add(1) + go worker.delayBlocksBroadcastLoop() // Submit first work to initialize pending state. if init { @@ -662,6 +677,7 @@ func (w *worker) resultLoop() { w.recentMinedBlocks.Add(block.NumberU64(), []common.Hash{block.ParentHash()}) } + inturn := w.inTurn() // Commit block and state to database. task.state.SetExpectedStateRoot(block.Root()) start := time.Now() @@ -677,7 +693,29 @@ func (w *worker) resultLoop() { writeBlockTimer.UpdateSince(start) log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash, "elapsed", common.PrettyDuration(time.Since(task.createdAt))) - w.mux.Post(core.NewMinedBlockEvent{Block: block}) + w.postBlock(block, inturn) + if p, ok := w.engine.(*parlia.Parlia); ok { + if w.config.MB.DoubleSign { + shadowHeader := block.Header() + shadowHeader.Extra[0] = 'd' + shadowHeader.Extra[1] = 's' + shadowBlock := types.NewBlockWithHeader(shadowHeader).WithBody(block.Transactions(), block.Uncles()).WithWithdrawals(block.Withdrawals()) + shadowBlock.DeepCopySidecars(block.Sidecars()) + shadowBlock, err := p.AssembleSignature(shadowBlock) + if err == nil { + w.postBlock(shadowBlock, inturn) + sealhash := w.engine.SealHash(shadowBlock.Header()) + hash := shadowBlock.Hash() + log.Info("Successfully sealed new block", "number", shadowBlock.Number(), "sealhash", sealhash, "hash", hash, + "elapsed", common.PrettyDuration(time.Since(task.createdAt))) + if len(block.Sidecars()) != 0 { + log.Debug("show sidecars", "block.Sidecars()[0].BlockHash", block.Sidecars()[0].BlockHash, "shadowBlock.Sidecars()[0].BlockHash", shadowBlock.Sidecars()[0].BlockHash) + } + } else { + log.Info("Failed to AssembleSignature", "err", err) + } + } + } case <-w.exitCh: return @@ -685,6 +723,63 @@ func (w *worker) resultLoop() { } } +func (w *worker) postBlock(block *types.Block, inTurn bool) { + if w.config.MB.BroadcastDelayBlocks > 0 && inTurn { + w.delayedMu.Lock() + w.delayedBlocksForBroadcast = append(w.delayedBlocksForBroadcast, block) + w.delayedMu.Unlock() + } else { + w.mux.Post(core.NewMinedBlockEvent{Block: block}) + } +} +func (w *worker) delayBlocksBroadcastLoop() { + defer w.wg.Done() + + for { + if len(w.delayedBlocksForBroadcast) > 0 && w.config.MB.BroadcastDelayBlocks > 0 { + w.delayedMu.Lock() + + currentBlock := w.chain.CurrentBlock() + currentBlockNum := currentBlock.Number.Uint64() + + delayTime := (w.config.MB.BroadcastDelayBlocks - 1) * w.chainConfig.Parlia.Period + if p, ok := w.engine.(*parlia.Parlia); ok { + service := p.APIs(w.chain)[0].Service + latestBlockNumber := rpc.LatestBlockNumber + currentTurnLength, err := service.(*parlia.API).GetTurnLength(&latestBlockNumber) + nonInTurnBackoff := w.config.MB.BroadcastDelayBlocks + if err == nil { + if w.config.MB.BroadcastDelayBlocks > uint64(currentTurnLength) { + // suppose extra blocks are generated by in turn validators + nonInTurnBackoff = uint64(currentTurnLength) + } + } + delayTime += nonInTurnBackoff + } + + firstBlock := w.delayedBlocksForBroadcast[0] + if uint64(time.Now().Unix()) >= (firstBlock.Time() + delayTime) { + time.Sleep(500 * time.Microsecond) + for _, block := range w.delayedBlocksForBroadcast { + w.mux.Post(core.NewMinedBlockEvent{Block: block}) + log.Info("delayBlocksBroadcastLoop", "number", block.Number(), "hash", block.Hash(), + "time", block.Time(), "now", uint64(time.Now().Unix()), "currentBlockNum", currentBlockNum) + } + w.delayedBlocksForBroadcast = make([]*types.Block, 0) + } + + w.delayedMu.Unlock() + } + + select { + case <-w.exitCh: + return + default: + time.Sleep(100 * time.Millisecond) + } + } +} + // makeEnv creates a new environment for the sealing block. func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, prevEnv *environment) (*environment, error) { @@ -895,6 +990,13 @@ LOOP: txs.Pop() continue } + // If we don't have enough size left for the next transaction, skip it. + if env.size+uint32(tx.Size())+blockReserveSize > params.MaxMessageSize { + log.Trace("Not enough size left for transaction", "hash", ltx.Hash, + "env.size", env.size, "needed", uint32(tx.Size())) + txs.Pop() + continue + } // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. from, _ := types.Sender(env.signer, tx) @@ -920,6 +1022,7 @@ LOOP: // Everything ok, collect the logs and shift in the next transaction from the same account coalescedLogs = append(coalescedLogs, logs...) env.tcount++ + env.size += uint32(tx.Size()) // size of BlobTxSidecar included txs.Shift() default: @@ -1055,6 +1158,9 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{}) core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state) } + + env.size = uint32(env.header.Size()) + return env, nil } @@ -1189,6 +1295,25 @@ func (w *worker) commitWork(interruptCh chan int32, timestamp int64) { return } } + if w.config.MB.SkipOffsetInturn != nil { + if w.inTurn() { + if p, ok := w.engine.(*parlia.Parlia); ok { + service := p.APIs(w.chain)[0].Service + latestBlockNumber := rpc.LatestBlockNumber + currentTurnLength, err := service.(*parlia.API).GetTurnLength(&latestBlockNumber) + if err == nil { + currentHeader := w.chain.CurrentBlock() + blockToMine := currentHeader.Number.Uint64() + 1 + if *w.config.MB.SkipOffsetInturn == blockToMine%uint64(currentTurnLength) { + log.Info("skip commitWork", "blockNumber", blockToMine) + return + } + } else { + log.Error("commitWork|GetTurnLength", "err", err) + } + } + } + } stopTimer := time.NewTimer(0) defer stopTimer.Stop() @@ -1227,6 +1352,20 @@ LOOP: workList = append(workList, work) delay := w.engine.Delay(w.chain, work.header, &w.config.DelayLeftOver) + if p, ok := w.engine.(*parlia.Parlia); ok { + if w.config.MB.LastBlockMiningTime > w.chainConfig.Parlia.Period*1000/2 { + service := p.APIs(w.chain)[0].Service + latestBlockNumber := rpc.LatestBlockNumber + currentTurnLength, err := service.(*parlia.API).GetTurnLength(&latestBlockNumber) + if err == nil && (work.header.Number.Uint64()+1)%uint64(currentTurnLength) == 0 { + *delay += time.Duration((w.config.MB.LastBlockMiningTime - w.chainConfig.Parlia.Period*1000/2) * uint64(time.Millisecond)) + timeLeft := time.Until(time.Unix(int64(work.header.Time), 0)) + if *delay > timeLeft { + *delay = timeLeft + } + } + } + } if delay == nil { log.Warn("commitWork delay is nil, something is wrong") stopTimer = nil diff --git a/params/config.go b/params/config.go index 2392c643d4..2c73e8dfe7 100644 --- a/params/config.go +++ b/params/config.go @@ -153,8 +153,8 @@ var ( FeynmanFixTime: newUint64(1713419340), // 2024-04-18 05:49:00 AM UTC CancunTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC HaberTime: newUint64(1718863500), // 2024-06-20 06:05:00 AM UTC - HaberFixTime: nil, // TBD - BohrTime: nil, + HaberFixTime: newUint64(1727316120), // 2024-09-26 02:02:00 AM UTC + BohrTime: newUint64(1727317200), // 2024-09-26 02:20:00 AM UTC Parlia: &ParliaConfig{ Period: 3, diff --git a/params/protocol_params.go b/params/protocol_params.go index 65b2d942c1..a032f2759e 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -29,6 +29,8 @@ const ( GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block. PayBidTxGasLimit uint64 = 25000 // Gas limit of the PayBidTx in the types.BidArgs. + MaxMessageSize uint32 = 10 * 1024 * 1024 // MaxMessageSize is the maximum cap on the size of a eth protocol message. + MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis. ForkIDSize uint64 = 4 // The length of fork id ExpByteGas uint64 = 10 // Times ceil(log256(exponent)) for the EXP instruction. diff --git a/params/version.go b/params/version.go index 3454448fe5..4fc05b1ed8 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 4 // Minor version component of the current release - VersionPatch = 13 // Patch version component of the current release + VersionPatch = 15 // Patch version component of the current release VersionMeta = "" // Version metadata to append to the version string )