Skip to content

Commit

Permalink
Merge pull request #162 from testinprod-io/upstream-v2.57.3
Browse files Browse the repository at this point in the history
Upstream v2.57.3
  • Loading branch information
ImTei authored Apr 18, 2024
2 parents 07ce50f + a383d0c commit 8906795
Show file tree
Hide file tree
Showing 359 changed files with 13,878 additions and 7,915 deletions.
5 changes: 4 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ jobs:
if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }}
strategy:
matrix:
os: [ ubuntu-22.04, macos-13-xlarge ] # list of os: https://github.com/actions/virtual-environments
# list of os: https://github.com/actions/virtual-environments
os:
- ubuntu-22.04
- macos-13
runs-on: ${{ matrix.os }}

steps:
Expand Down
81 changes: 81 additions & 0 deletions .github/workflows/qa-clean-exit-block-downloading.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
name: QA - Clean exit (block downloading)

on:
push:
branches:
- 'release/**'
pull_request:
branches:
- devel
- 'release/**'
types:
- ready_for_review

jobs:
long-running-test:
runs-on: self-hosted
env:
ERIGON_REFERENCE_DATA_DIR: /opt/erigon-release/datadir
ERIGON_TESTBED_DATA_DIR: /opt/erigon-testbed/datadir
WORKING_TIME_SECONDS: 600

steps:
- name: Check out repository
uses: actions/checkout@v2

- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.21'

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x'

#- name: Install dependencies
# run: |
# sudo apt-get update
# sudo apt-get install -y build-essential make gcc

- name: Restore Erigon Testbed Data Directory
run: |
rsync -av --delete $ERIGON_REFERENCE_DATA_DIR/ $ERIGON_TESTBED_DATA_DIR/
- name: Clean Erigon Build Directory
run: |
make clean
- name: Build Erigon
run: |
make erigon
working-directory: ${{ github.workspace }}

#- name: Download Python Script for Logs Checking
# run: |
# curl -o check_erigon_exit.py 'https://gist.githubusercontent.com/mriccobene/8db4030a745de34d527f136f2caa104f/raw/3c1a860cb87d61075e78ce399e17f0ab157cacc6/check_erigon_exit.py'

- name: Run Erigon, send ctrl-c and check for clean exiting
run: |
# Run Erigon, send ctrl-c and check logs
python3 ${{ github.workspace }}/../../../../erigon-qa/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $WORKING_TIME_SECONDS
# Capture monitoring script exit status
monitoring_exit_status=$?

# Clean up Erigon process if it's still running
if kill -0 $ERIGON_PID 2> /dev/null; then
echo "Terminating Erigon"
kill $ERIGON_PID
wait $ERIGON_PID
else
echo "Erigon has already terminated"
fi

# Check monitoring script exit status
if [ $monitoring_exit_status -eq 0 ]; then
echo "Monitoring completed successfully"
else
echo "Error detected in Erigon logs or monitoring script exited unexpectedly"
exit 1
fi
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: QA - Clean exit on Ctrl-C
name: QA - Clean exit (snapshot downloading)

on:
push:
Expand All @@ -19,14 +19,10 @@ on:

jobs:
long-running-test:
#if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }}
#strategy:
# matrix:
# os: [ ubuntu-22.04, macos-13-xlarge ]
#runs-on: ${{ matrix.os }}
runs-on: self-hosted
env:
ERIGON_DATA_DIR: ${{ github.workspace }}/erigon_data
WORKING_TIME_SECONDS: 600

steps:
- name: Check out repository
Expand All @@ -35,7 +31,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.20'
go-version: '1.21'

- name: Set up Python
uses: actions/setup-python@v4
Expand Down Expand Up @@ -63,7 +59,7 @@ jobs:
- name: Run Erigon, send ctrl-c and check for clean exiting
run: |
# Run Erigon, send ctrl-c and check logs
python3 ${{ github.workspace }}/../../../../erigon-qa/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR
python3 ${{ github.workspace }}/../../../../erigon-qa/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $WORKING_TIME_SECONDS
# Capture monitoring script exit status
monitoring_exit_status=$?
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ jobs:
id: prepare
run: |
TAG=${GITHUB_REF#refs/tags/}
echo ::set-output name=tag_name::${TAG}
echo "tag_name=${TAG}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2

Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/test-integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,10 @@ jobs:
tests:
strategy:
matrix:
os: [ ubuntu-22.04, macos-13-xlarge ] # list of os: https://github.com/actions/virtual-environments
# list of os: https://github.com/actions/virtual-environments
os:
- ubuntu-22.04
- macos-13
runs-on: ${{ matrix.os }}

steps:
Expand Down
1 change: 1 addition & 0 deletions cl/abstract/beacon_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ type BeaconStateExtension interface {
ValidatorIndexByPubkey(key [48]byte) (uint64, bool)
PreviousStateRoot() common.Hash
SetPreviousStateRoot(root common.Hash)
GetValidatorActivationChurnLimit() uint64
}

type BeaconStateBasic interface {
Expand Down
46 changes: 25 additions & 21 deletions cl/antiquary/state_antiquary.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,41 +150,42 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error {
return next(k, k, v)
}

effectiveBalance := etl.NewCollector(kv.ValidatorEffectiveBalance, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
etlBufSz := etl.BufferOptimalSize / 8 // 18 collectors * 256mb / 8 = 512mb in worst case
effectiveBalance := etl.NewCollector(kv.ValidatorEffectiveBalance, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer effectiveBalance.Close()
balances := etl.NewCollector(kv.ValidatorBalance, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
balances := etl.NewCollector(kv.ValidatorBalance, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer balances.Close()
randaoMixes := etl.NewCollector(kv.RandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
randaoMixes := etl.NewCollector(kv.RandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer randaoMixes.Close()
intraRandaoMixes := etl.NewCollector(kv.IntraRandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
intraRandaoMixes := etl.NewCollector(kv.IntraRandaoMixes, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer intraRandaoMixes.Close()
proposers := etl.NewCollector(kv.Proposers, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
proposers := etl.NewCollector(kv.Proposers, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer proposers.Close()
slashings := etl.NewCollector(kv.ValidatorSlashings, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
slashings := etl.NewCollector(kv.ValidatorSlashings, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer slashings.Close()
blockRoots := etl.NewCollector(kv.BlockRoot, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
blockRoots := etl.NewCollector(kv.BlockRoot, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer blockRoots.Close()
stateRoots := etl.NewCollector(kv.StateRoot, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
stateRoots := etl.NewCollector(kv.StateRoot, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer stateRoots.Close()
slotData := etl.NewCollector(kv.SlotData, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
slotData := etl.NewCollector(kv.SlotData, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer slotData.Close()
epochData := etl.NewCollector(kv.EpochData, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
epochData := etl.NewCollector(kv.EpochData, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer epochData.Close()
inactivityScoresC := etl.NewCollector(kv.InactivityScores, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
inactivityScoresC := etl.NewCollector(kv.InactivityScores, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer inactivityScoresC.Close()
nextSyncCommittee := etl.NewCollector(kv.NextSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
nextSyncCommittee := etl.NewCollector(kv.NextSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer nextSyncCommittee.Close()
currentSyncCommittee := etl.NewCollector(kv.CurrentSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
currentSyncCommittee := etl.NewCollector(kv.CurrentSyncCommittee, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer currentSyncCommittee.Close()
currentEpochAttestations := etl.NewCollector(kv.CurrentEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
currentEpochAttestations := etl.NewCollector(kv.CurrentEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer currentEpochAttestations.Close()
previousEpochAttestations := etl.NewCollector(kv.PreviousEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
previousEpochAttestations := etl.NewCollector(kv.PreviousEpochAttestations, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer previousEpochAttestations.Close()
eth1DataVotes := etl.NewCollector(kv.Eth1DataVotes, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
eth1DataVotes := etl.NewCollector(kv.Eth1DataVotes, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer eth1DataVotes.Close()
stateEvents := etl.NewCollector(kv.StateEvents, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
stateEvents := etl.NewCollector(kv.StateEvents, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer stateEvents.Close()
activeValidatorIndicies := etl.NewCollector(kv.ActiveValidatorIndicies, s.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize), s.logger)
activeValidatorIndicies := etl.NewCollector(kv.ActiveValidatorIndicies, s.dirs.Tmp, etl.NewSortableBuffer(etlBufSz), s.logger)
defer activeValidatorIndicies.Close()

progress, err := state_accessors.GetStateProcessingProgress(tx)
Expand Down Expand Up @@ -360,9 +361,11 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error {
defer progressTimer.Stop()
prevSlot := slot
first := false
blocksBeforeCommit := 100_000
blocksProcessed := 0
// This tells us that transition and operations do not happen concurrently and access is safe, so we can optimize for GC.
// there is optimized custom cache to recycle big GC overhead.
for ; slot < to; slot++ {
for ; slot < to && blocksProcessed < blocksBeforeCommit; slot++ {
slashingOccured = false // Set this to false at the beginning of each slot.
key := base_encoding.Encode64ToBytes4(slot)

Expand Down Expand Up @@ -418,12 +421,13 @@ func (s *Antiquary) IncrementBeaconState(ctx context.Context, to uint64) error {
prevValSet = prevValSet[:0]
prevValSet = append(prevValSet, s.currentState.RawValidatorSet()...)

fullValidation := slot%100_000 == 0 || first
fullValidation := slot%1000 == 0 || first
blockRewardsCollector := &eth2.BlockRewardsCollector{}
// We sanity check the state every 100k slots or when we start.
// We sanity check the state every 1k slots or when we start.
if err := transition.TransitionState(s.currentState, block, blockRewardsCollector, fullValidation); err != nil {
return err
}
blocksProcessed++

first = false

Expand Down
1 change: 0 additions & 1 deletion cl/beacon/beaconhttp/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc {
ans, err := h.Handle(w, r)
log.Debug("beacon api request", "endpoint", r.URL.Path, "duration", time.Since(start))
if err != nil {
log.Error("beacon api request error", "err", err)
var endpointError *EndpointError
if e, ok := err.(*EndpointError); ok {
endpointError = e
Expand Down
Loading

0 comments on commit 8906795

Please sign in to comment.