Skip to content

Commit

Permalink
Merge branch 'main' into som/fix_7702_pectra4
Browse files Browse the repository at this point in the history
  • Loading branch information
somnathb1 committed Oct 24, 2024
2 parents 81f92bd + f9527b8 commit 3fe6632
Show file tree
Hide file tree
Showing 18 changed files with 63 additions and 80 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/qa-rpc-test-bisection-tool.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ jobs:
- name: Checkout RPC Tests Repository & Install Requirements
run: |
rm -rf $GITHUB_WORKSPACE/rpc-tests
git -c advice.detachedHead=false clone --depth 1 --branch v1.00.0 https://github.com/erigontech/rpc-tests $GITHUB_WORKSPACE/rpc-tests
git -c advice.detachedHead=false clone --depth 1 --branch v1.0.0 https://github.com/erigontech/rpc-tests $GITHUB_WORKSPACE/rpc-tests
cd $GITHUB_WORKSPACE/rpc-tests
pip3 install -r requirements.txt
Expand Down
10 changes: 10 additions & 0 deletions cl/monitor/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ var (
metricProposerHit = metrics.GetOrCreateCounter("validator_proposal_hit")
// metricProposerMiss is the number of proposals that miss for those validators we observe in previous slot
metricProposerMiss = metrics.GetOrCreateCounter("validator_proposal_miss")
// aggregateAndProofSignatures is the sum of signatures in all the aggregates in the recent slot
aggregateAndProofSignatures = metrics.GetOrCreateGauge("aggregate_and_proof_signatures")

// Block processing metrics
fullBlockProcessingTime = metrics.GetOrCreateGauge("full_block_processing_time")
Expand Down Expand Up @@ -122,6 +124,11 @@ func microToMilli(micros int64) float64 {
return float64(micros) / 1000
}

// ObserveNumberOfAggregateSignatures sets the average processing time for each attestation in aggregate
func ObserveNumberOfAggregateSignatures(signatures int) {
aggregateAndProofSignatures.Add(float64(signatures))
}

// ObserveEpochProcessingTime sets last epoch processing time
func ObserveEpochProcessingTime(startTime time.Time) {
epochProcessingTime.Set(float64(time.Since(startTime).Microseconds()))
Expand Down Expand Up @@ -218,6 +225,9 @@ func ObserveActiveValidatorsCount(count int) {
}

func ObserveCurrentSlot(slot uint64) {
if currentSlot.GetValueUint64() != slot {
aggregateAndProofSignatures.Set(0)
}
currentSlot.Set(float64(slot))
}

Expand Down
2 changes: 2 additions & 0 deletions cl/phase1/network/services/aggregate_and_proof_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,8 @@ func (a *aggregateAndProofServiceImpl) ProcessMessage(
return errors.New("no attesting indicies")
}

monitor.ObserveNumberOfAggregateSignatures(len(attestingIndices))

// [REJECT] The aggregator's validator index is within the committee -- i.e. aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, index).
if !slices.Contains(committee, aggregateAndProof.SignedAggregateAndProof.Message.AggregatorIndex) {
return errors.New("committee index not in committee")
Expand Down
1 change: 0 additions & 1 deletion cmd/rpcdaemon/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,6 @@ The following table shows the current implementation status of Erigon's RPC daem
| engine_getPayloadV1 | Yes | |
| engine_getPayloadV2 | Yes | |
| engine_getPayloadV3 | Yes | |
| engine_exchangeTransitionConfigurationV1 | Yes | |
| | | |
| debug_accountRange | Yes | Private Erigon debug module |
| debug_accountAt | Yes | Private Erigon debug module |
Expand Down
2 changes: 1 addition & 1 deletion erigon-lib/kv/mdbx/kv_mdbx.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ type MdbxOpts struct {
shrinkThreshold int
flags uint
pageSize uint64
dirtySpace uint64 // if exeed this space, modified pages will `spill` to disk
dirtySpace uint64 // if exceed this space, modified pages will `spill` to disk
mergeThreshold uint64
verbosity kv.DBVerbosityLvl
label kv.Label // marker to distinct db instances - one process may open many databases. for example to collect metrics of only 1 database
Expand Down
8 changes: 2 additions & 6 deletions erigon-lib/state/aggregator.go
Original file line number Diff line number Diff line change
Expand Up @@ -1631,12 +1631,7 @@ func (a *Aggregator) BuildFilesInBackground(txNum uint64) chan struct{} {
}

step := a.visibleFilesMinimaxTxNum.Load() / a.StepSize()
lastInDB := max(
lastIdInDB(a.db, a.d[kv.AccountsDomain]),
lastIdInDB(a.db, a.d[kv.CodeDomain]),
lastIdInDB(a.db, a.d[kv.StorageDomain]),
lastIdInDBNoHistory(a.db, a.d[kv.CommitmentDomain]))
log.Info("BuildFilesInBackground", "step", step, "lastInDB", lastInDB)

a.wg.Add(1)
go func() {
defer a.wg.Done()
Expand All @@ -1656,6 +1651,7 @@ func (a *Aggregator) BuildFilesInBackground(txNum uint64) chan struct{} {
lastIdInDB(a.db, a.d[kv.CodeDomain]),
lastIdInDB(a.db, a.d[kv.StorageDomain]),
lastIdInDBNoHistory(a.db, a.d[kv.CommitmentDomain]))
log.Info("BuildFilesInBackground", "step", step, "lastInDB", lastInDB)

// check if db has enough data (maybe we didn't commit them yet or all keys are unique so history is empty)
//lastInDB := lastIdInDB(a.db, a.d[kv.AccountsDomain])
Expand Down
11 changes: 7 additions & 4 deletions erigon-lib/state/domain.go
Original file line number Diff line number Diff line change
Expand Up @@ -466,13 +466,16 @@ func (d *Domain) openDirtyFiles() (err error) {
}

func (d *Domain) closeWhatNotInList(fNames []string) {
protectFiles := make(map[string]struct{}, len(fNames))
for _, f := range fNames {
protectFiles[f] = struct{}{}
}
var toClose []*filesItem
d.dirtyFiles.Walk(func(items []*filesItem) bool {
Loop1:
for _, item := range items {
for _, protectName := range fNames {
if item.decompressor != nil && item.decompressor.FileName() == protectName {
continue Loop1
if item.decompressor != nil {
if _, ok := protectFiles[item.decompressor.FileName()]; ok {
continue
}
}
toClose = append(toClose, item)
Expand Down
13 changes: 8 additions & 5 deletions erigon-lib/state/history.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ type History struct {
compressCfg seg.Cfg
compression seg.FileCompression

//TODO: re-visit this check - maybe we don't need it. It's abot kill in the middle of merge
//TODO: re-visit this check - maybe we don't need it. It's about kill in the middle of merge
integrityCheck func(fromStep, toStep uint64) bool

// not large:
Expand Down Expand Up @@ -292,13 +292,16 @@ func (h *History) openDirtyFiles() error {
}

func (h *History) closeWhatNotInList(fNames []string) {
protectFiles := make(map[string]struct{}, len(fNames))
for _, f := range fNames {
protectFiles[f] = struct{}{}
}
var toClose []*filesItem
h.dirtyFiles.Walk(func(items []*filesItem) bool {
Loop1:
for _, item := range items {
for _, protectName := range fNames {
if item.decompressor != nil && item.decompressor.FileName() == protectName {
continue Loop1
if item.decompressor != nil {
if _, ok := protectFiles[item.decompressor.FileName()]; ok {
continue
}
}
toClose = append(toClose, item)
Expand Down
12 changes: 8 additions & 4 deletions erigon-lib/state/inverted_index.go
Original file line number Diff line number Diff line change
Expand Up @@ -348,15 +348,19 @@ func (ii *InvertedIndex) openDirtyFiles() error {
}

func (ii *InvertedIndex) closeWhatNotInList(fNames []string) {
protectFiles := make(map[string]struct{}, len(fNames))
for _, f := range fNames {
protectFiles[f] = struct{}{}
}
var toClose []*filesItem
ii.dirtyFiles.Walk(func(items []*filesItem) bool {
Loop1:
for _, item := range items {
for _, protectName := range fNames {
if item.decompressor != nil && item.decompressor.FileName() == protectName {
continue Loop1
if item.decompressor != nil {
if _, ok := protectFiles[item.decompressor.FileName()]; ok {
continue
}
}

toClose = append(toClose, item)
}
return true
Expand Down
2 changes: 1 addition & 1 deletion eth/stagedsync/stage.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func (s *StageState) ExecutionAt(db kv.Getter) (uint64, error) {
}

type UnwindReason struct {
// If we;re unwinding due to a fork - we want to unlink blocks but not mark
// If we're unwinding due to a fork - we want to unlink blocks but not mark
// them as bad - as they may get replayed then deselected
Block *libcommon.Hash
// If unwind is caused by a bad block, this error is not empty
Expand Down
2 changes: 1 addition & 1 deletion p2p/sentry/sentry_multi_client/sentry_multi_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ func (cs *MultiClient) blockBodies66(ctx context.Context, inreq *proto_sentry.In
// No point processing empty response
return nil
}
cs.Bd.DeliverBodies(txs, uncles, withdrawals, nil, uint64(len(inreq.Data)), sentry.ConvertH512ToPeerID(inreq.PeerId))
cs.Bd.DeliverBodies(txs, uncles, withdrawals, uint64(len(inreq.Data)), sentry.ConvertH512ToPeerID(inreq.PeerId))
return nil
}

Expand Down
25 changes: 0 additions & 25 deletions turbo/engineapi/engine_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -737,30 +737,6 @@ func (e *EngineServer) NewPayloadV4(ctx context.Context, payload *engine_types.E
return e.newPayload(ctx, payload, expectedBlobHashes, parentBeaconBlockRoot, executionRequests, clparams.ElectraVersion)
}

// Receives consensus layer's transition configuration and checks if the execution layer has the correct configuration.
// Can also be used to ping the execution layer (heartbeats).
// See https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#engine_exchangetransitionconfigurationv1
func (e *EngineServer) ExchangeTransitionConfigurationV1(ctx context.Context, beaconConfig *engine_types.TransitionConfiguration) (*engine_types.TransitionConfiguration, error) {
terminalTotalDifficulty := e.config.TerminalTotalDifficulty
if e.caplin {
e.logger.Crit(caplinEnabledLog)
return nil, errCaplinEnabled
}
if terminalTotalDifficulty == nil {
return nil, fmt.Errorf("the execution layer doesn't have a terminal total difficulty. expected: %v", beaconConfig.TerminalTotalDifficulty)
}

if terminalTotalDifficulty.Cmp((*big.Int)(beaconConfig.TerminalTotalDifficulty)) != 0 {
return nil, fmt.Errorf("the execution layer has a wrong terminal total difficulty. expected %v, but instead got: %d", beaconConfig.TerminalTotalDifficulty, terminalTotalDifficulty)
}

return &engine_types.TransitionConfiguration{
TerminalTotalDifficulty: (*hexutil.Big)(terminalTotalDifficulty),
TerminalBlockHash: libcommon.Hash{},
TerminalBlockNumber: (*hexutil.Big)(libcommon.Big0),
}, nil
}

// Returns an array of execution payload bodies referenced by their block hashes
// See https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_getpayloadbodiesbyhashv1
func (e *EngineServer) GetPayloadBodiesByHashV1(ctx context.Context, hashes []libcommon.Hash) ([]*engine_types.ExecutionPayloadBody, error) {
Expand Down Expand Up @@ -797,7 +773,6 @@ var ourCapabilities = []string{
"engine_getPayloadV2",
"engine_getPayloadV3",
"engine_getPayloadV4",
"engine_exchangeTransitionConfigurationV1",
"engine_getPayloadBodiesByHashV1",
"engine_getPayloadBodiesByHashV2",
"engine_getPayloadBodiesByRangeV1",
Expand Down
1 change: 0 additions & 1 deletion turbo/engineapi/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ type EngineAPI interface {
GetPayloadV2(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error)
GetPayloadV3(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error)
GetPayloadV4(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error)
ExchangeTransitionConfigurationV1(ctx context.Context, transitionConfiguration *engine_types.TransitionConfiguration) (*engine_types.TransitionConfiguration, error)
GetPayloadBodiesByHashV1(ctx context.Context, hashes []common.Hash) ([]*engine_types.ExecutionPayloadBody, error)
GetPayloadBodiesByHashV2(ctx context.Context, hashes []common.Hash) ([]*engine_types.ExecutionPayloadBody, error)
GetPayloadBodiesByRangeV1(ctx context.Context, start, count hexutil.Uint64) ([]*engine_types.ExecutionPayloadBody, error)
Expand Down
2 changes: 1 addition & 1 deletion turbo/jsonrpc/receipts/bor_receipts_generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func (g *BorGenerator) GenerateBorReceipt(ctx context.Context, tx kv.Tx, block *
}

txNumsReader := rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, g.blockReader))
ibs, blockContext, _, _, _, err := transactions.ComputeBlockContext(ctx, g.engine, block.HeaderNoCopy(), chainConfig, g.blockReader, txNumsReader, tx, len(blockReceipts)-1)
ibs, blockContext, _, _, _, err := transactions.ComputeBlockContext(ctx, g.engine, block.HeaderNoCopy(), chainConfig, g.blockReader, txNumsReader, tx, len(blockReceipts)) // we want to get the state at the end of the block
if err != nil {
return nil, err
}
Expand Down
13 changes: 6 additions & 7 deletions turbo/snapshotsync/freezeblocks/block_snapshots.go
Original file line number Diff line number Diff line change
Expand Up @@ -933,23 +933,22 @@ func (s *RoSnapshots) Close() {
}

func (s *RoSnapshots) closeWhatNotInList(l []string) {
protectFiles := make(map[string]struct{}, len(l))
for _, f := range l {
protectFiles[f] = struct{}{}
}
toClose := make(map[snaptype.Enum][]*DirtySegment, 0)
s.segments.Scan(func(segtype snaptype.Enum, value *segments) bool {
value.DirtySegments.Walk(func(segs []*DirtySegment) bool {

Loop1:
for _, seg := range segs {
for _, fName := range l {
if fName == seg.FileName() {
continue Loop1
}
if _, ok := protectFiles[seg.FileName()]; ok {
continue
}
if _, ok := toClose[seg.segType.Enum()]; !ok {
toClose[segtype] = make([]*DirtySegment, 0)
}
toClose[segtype] = append(toClose[segtype], seg)
}

return true
})
return true
Expand Down
22 changes: 10 additions & 12 deletions turbo/snapshotsync/freezeblocks/caplin_snapshots.go
Original file line number Diff line number Diff line change
Expand Up @@ -379,18 +379,19 @@ func (s *CaplinSnapshots) OpenFolder() error {
}

func (s *CaplinSnapshots) closeWhatNotInList(l []string) {
protectFiles := make(map[string]struct{}, len(l))
for _, fName := range l {
protectFiles[fName] = struct{}{}
}
toClose := make([]*DirtySegment, 0)
s.BeaconBlocks.DirtySegments.Walk(func(segments []*DirtySegment) bool {
Loop1:
for _, sn := range segments {
if sn.Decompressor == nil {
continue Loop1
continue
}
_, name := filepath.Split(sn.FilePath())
for _, fName := range l {
if fName == name {
continue Loop1
}
if _, ok := protectFiles[name]; ok {
continue
}
toClose = append(toClose, sn)
}
Expand All @@ -403,16 +404,13 @@ func (s *CaplinSnapshots) closeWhatNotInList(l []string) {

toClose = make([]*DirtySegment, 0)
s.BlobSidecars.DirtySegments.Walk(func(segments []*DirtySegment) bool {
Loop2:
for _, sn := range segments {
if sn.Decompressor == nil {
continue Loop2
continue
}
_, name := filepath.Split(sn.FilePath())
for _, fName := range l {
if fName == name {
continue Loop2
}
if _, ok := protectFiles[name]; ok {
continue
}
toClose = append(toClose, sn)
}
Expand Down
10 changes: 3 additions & 7 deletions turbo/stages/bodydownload/body_algos.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,7 @@ func (bd *BodyDownload) RequestMoreBodies(tx kv.RwTx, blockReader services.FullB
}
if request {
if header.UncleHash == types.EmptyUncleHash && header.TxHash == types.EmptyRootHash &&
(header.WithdrawalsHash == nil || *header.WithdrawalsHash == types.EmptyRootHash) &&
(header.RequestsHash == nil || *header.RequestsHash == types.EmptyRequestsHash) {
(header.WithdrawalsHash == nil || *header.WithdrawalsHash == types.EmptyRootHash) {
// Empty block body
body := &types.RawBody{}
if header.WithdrawalsHash != nil {
Expand All @@ -192,14 +191,11 @@ func (bd *BodyDownload) RequestMoreBodies(tx kv.RwTx, blockReader services.FullB
if header.WithdrawalsHash != nil {
copy(bodyHashes[2*length.Hash:], header.WithdrawalsHash.Bytes())
}
if header.RequestsHash != nil {
copy(bodyHashes[3*length.Hash:], header.RequestsHash.Bytes())
}
bd.requestedMap[bodyHashes] = blockNum
blockNums = append(blockNums, blockNum)
hashes = append(hashes, hash)
} else {
// uncleHash, txHash, withdrawalsHash, and requestsHash are all empty (or block is prefetched), no need to request
// uncleHash, txHash, and withdrawalsHash are all empty (or block is prefetched), no need to request
bd.delivered.Add(blockNum)
}
}
Expand Down Expand Up @@ -253,7 +249,7 @@ func (bd *BodyDownload) RequestSent(bodyReq *BodyRequest, timeWithTimeout uint64

// DeliverBodies takes the block body received from a peer and adds it to the various data structures
func (bd *BodyDownload) DeliverBodies(txs [][][]byte, uncles [][]*types.Header, withdrawals []types.Withdrawals,
requests []types.FlatRequests, lenOfP2PMsg uint64, peerID [64]byte,
lenOfP2PMsg uint64, peerID [64]byte,
) {
bd.deliveryCh <- Delivery{txs: txs, uncles: uncles, withdrawals: withdrawals, lenOfP2PMessage: lenOfP2PMsg, peerID: peerID}

Expand Down
5 changes: 2 additions & 3 deletions turbo/stages/bodydownload/body_data_struct.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ import (
"github.com/erigontech/erigon/core/types"
)

// BodyHashes is to be used for the mapping between TxHash, UncleHash, WithdrawalsHash, and RequestRoot to the block header
type BodyHashes [4 * length.Hash]byte
// BodyHashes is to be used for the mapping between TxHash, UncleHash, and WithdrawalsHash to the block header
type BodyHashes [3 * length.Hash]byte

const MaxBodiesInRequest = 1024

Expand All @@ -39,7 +39,6 @@ type Delivery struct {
txs [][][]byte
uncles [][]*types.Header
withdrawals []types.Withdrawals
requests []types.FlatRequests
lenOfP2PMessage uint64
}

Expand Down

0 comments on commit 3fe6632

Please sign in to comment.