diff --git a/.github/workflows/ci-cd-main-branch-docker-images.yml b/.github/workflows/ci-cd-main-branch-docker-images.yml
index bdfc1e6a883..b476d0db4c9 100644
--- a/.github/workflows/ci-cd-main-branch-docker-images.yml
+++ b/.github/workflows/ci-cd-main-branch-docker-images.yml
@@ -9,6 +9,7 @@ env:
CHECKOUT_REF: "main"
DOCKERHUB_REPOSITORY: "erigontech/erigon"
LABEL_DESCRIPTION: "[docker image built on a last commit id from the main branch] Erigon is an implementation of Ethereum (execution layer with embeddable consensus layer), on the efficiency frontier. Archive Node by default."
+ KEEP_IMAGES: 100
on:
push:
@@ -127,7 +128,7 @@ jobs:
--push \
--platform linux/amd64,linux/arm64 .
- - name: export and print docker build tag
+ - name: export and print docker build tag, cleanup old docker images
id: built_tag_export
env:
BUILD_VERSION: "main-${{ steps.getCommitId.outputs.short_commit_id }}"
@@ -136,6 +137,36 @@ jobs:
echo The following docker images have been published:
echo "${{ env.DOCKERHUB_REPOSITORY }}:main-${{ env.BUILD_VERSION }}"
echo "${{ env.DOCKERHUB_REPOSITORY }}:main-latest"
+ echo
+ echo "Cleanup old docker images matching pattern tag ~= main-XXXXXXX"
+ curl_cmd="curl -s -H \"Authorization: JWT ${{ secrets.ORG_DOCKERHUB_ERIGONTECH_TOKEN }}\" "
+ dockerhub_url='https://hub.docker.com/v2/namespaces/erigontech/repositories/erigon'
+ my_list () {
+ # First page:
+ next_page="$dockerhub_url/tags?page=1&page_size=100"
+ while [ "$next_page" != "null" ]
+ do
+ # Print tags and push dates for tags matching "main-":
+ $curl_cmd $next_page | jq -r '.results|.[]|.name + " " + .tag_last_pushed' | grep 'main-'
+ next_page=`$curl_cmd $next_page | jq '.next' | sed -e 's/^\"//' -e 's/\"$//'`
+ done
+ }
+
+ my_list | tail -n+${{ env.KEEP_IMAGES }} | while read line; do
+ echo -n "Removing docker image/published - $line "
+ current_image=$(echo $line | sed -e 's/^\(main-.\{7\}\) .*/\1/')
+ output_code=$(curl --write-out %{http_code} --output curl-output.log \
+ -s -X DELETE -H "Accept: application/json" \
+ -H "Authorization: JWT ${{ secrets.ORG_DOCKERHUB_ERIGONTECH_TOKEN }}" \
+ https://hub.docker.com/v2/repositories/erigontech/erigon/tags/${current_image} )
+ if [ $output_code -ne 204 ]; then
+ echo "ERROR: failed to remove docker image erigon:${current_image}"
+ echo "ERROR: API response: $(cat curl-output.log)."
+ else
+ echo -n " - removed. "
+ fi
+ echo "Done."
+ done
run-kurtosis-assertoor:
needs: [define_matrix, Build]
@@ -143,4 +174,4 @@ jobs:
with:
checkout_ref: ${{ github.sha }}
os: ${{ needs.define_matrix.outputs.os }}
- docker_build_tag: ${{ needs.Build.outputs.docker_build_tag }}
\ No newline at end of file
+ docker_build_tag: ${{ needs.Build.outputs.docker_build_tag }}
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 86502cf83d8..31cb5b4a28d 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -26,13 +26,13 @@ jobs:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
- go-version: '1.22'
+ go-version: '1.23'
- name: Install golangci-lint
if: runner.os == 'Linux'
uses: golangci/golangci-lint-action@v6
with:
- version: v1.59.1
+ version: v1.61.0
args: --help
- name: Lint
diff --git a/.github/workflows/qa-constrained-tip-tracking.yml b/.github/workflows/qa-constrained-tip-tracking.yml
index d461a492bfb..658a048343b 100644
--- a/.github/workflows/qa-constrained-tip-tracking.yml
+++ b/.github/workflows/qa-constrained-tip-tracking.yml
@@ -2,15 +2,8 @@ name: QA - Constrained Tip tracking
on:
schedule:
- - cron: '0 0 * * 0' # Run on Sunday at 00:00 AM UTC
+ - cron: '0 20 * * 0' # Run on Sunday at 08:00 PM UTC
workflow_dispatch: # Run manually
- pull_request:
- branches:
- - qa_tests_constrained_tip_tracking
- types:
- - opened
- - synchronize
- - ready_for_review
jobs:
constrained-tip-tracking-test:
diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml
index bbdd869281b..74cf9e0aca1 100644
--- a/.github/workflows/qa-rpc-integration-tests.yml
+++ b/.github/workflows/qa-rpc-integration-tests.yml
@@ -17,6 +17,7 @@ on:
jobs:
integration-test-suite:
runs-on: [ self-hosted, Erigon3 ]
+ timeout-minutes: 15
env:
ERIGON_REFERENCE_DATA_DIR: /opt/erigon-versions/reference-version/datadir
ERIGON_TESTBED_DATA_DIR: /opt/erigon-testbed/datadir
@@ -83,97 +84,10 @@ jobs:
cd ${{ runner.workspace }}/rpc-tests/integration
rm -rf ./mainnet/results/
-
+
# Run RPC integration test runner via http
- python3 ./run_tests.py -p 8545 --continue -f --json-diff -x \
- debug_accountRange,debug_getModifiedAccountsByHash,debug_getModifiedAccountsByNumber,debug_storageRangeAt,debug_traceBlockByHash,\
- debug_traceCallMany/test_02.tar,debug_traceCallMany/test_04.tar,debug_traceCallMany/test_05.tar,debug_traceCallMany/test_06.tar,debug_traceCallMany/test_07.tar,debug_traceCallMany/test_09.json,debug_traceCallMany/test_10.tar,\
- debug_traceBlockByNumber/test_05.tar,debug_traceBlockByNumber/test_08.tar,debug_traceBlockByNumber/test_09.tar,debug_traceBlockByNumber/test_10.tar,debug_traceBlockByNumber/test_11.tar,debug_traceBlockByNumber/test_12.tar,\
- debug_traceTransaction,\
- engine_exchangeCapabilities/test_1.json,\
- engine_exchangeTransitionConfigurationV1/test_01.json,\
- engine_getClientVersionV1/test_1.json,\
- erigon_getLogsByHash/test_04.json,\
- erigon_getHeaderByHash/test_02.json,\
- erigon_getHeaderByHash/test_03.json,\
- erigon_getHeaderByHash/test_04.json,\
- erigon_getHeaderByHash/test_06.json,\
- erigon_getHeaderByNumber/test_01.json,\
- erigon_getHeaderByNumber/test_02.json,\
- erigon_getHeaderByNumber/test_03.json,\
- erigon_getHeaderByNumber/test_04.json,\
- erigon_getHeaderByNumber/test_05.json,\
- erigon_getHeaderByNumber/test_06.json,\
- erigon_getHeaderByNumber/test_07.json,\
- erigon_getHeaderByNumber/test_08.json,\
- erigon_getLatestLogs/test_01.json,\
- erigon_getLatestLogs/test_02.json,\
- erigon_getLatestLogs/test_03.json,\
- erigon_getLatestLogs/test_04.json,\
- erigon_getLatestLogs/test_05.json,\
- erigon_getLatestLogs/test_06.json,\
- erigon_getLatestLogs/test_08.json,\
- erigon_getLatestLogs/test_09.json,\
- erigon_getLatestLogs/test_10.json,\
- erigon_getLatestLogs/test_11.json,\
- erigon_getLatestLogs/test_12.json,\
- erigon_getBalanceChangesInBlock,\
- eth_createAccessList/test_16.json,\
- parity_getBlockReceipts/test_01.json,\
- parity_getBlockReceipts/test_02.json,\
- parity_getBlockReceipts/test_03.json,\
- parity_getBlockReceipts/test_04.json,\
- parity_getBlockReceipts/test_05.json,\
- parity_getBlockReceipts/test_06.json,\
- parity_getBlockReceipts/test_07.json,\
- parity_getBlockReceipts/test_08.json,\
- parity_getBlockReceipts/test_09.json,\
- parity_getBlockReceipts/test_10.json,\
- trace_filter/test_16.json,\
- trace_rawTransaction/test_01.json,\
- trace_rawTransaction/test_03.json,\
- admin_nodeInfo/test_01.json,\
- admin_peers/test_01.json,\
- erigon_nodeInfo/test_1.json,\
- eth_coinbase/test_01.json,\
- eth_feeHistory/test_01.json,\
- eth_feeHistory/test_02.json,\
- eth_feeHistory/test_03.json,\
- eth_feeHistory/test_04.json,\
- eth_feeHistory/test_05.json,\
- eth_feeHistory/test_06.json,\
- eth_feeHistory/test_08.json,\
- eth_feeHistory/test_09.json,\
- eth_feeHistory/test_10.json,\
- eth_feeHistory/test_11.json,\
- eth_getBlockByHash/test_01.json,\
- eth_getBlockByHash/test_02.json,\
- eth_getBlockByHash/test_05.json,\
- eth_getBlockByHash/test_06.json,\
- eth_getBlockByHash/test_07.json,\
- eth_getBlockByHash/test_08.json,\
- eth_getBlockByNumber/test_01.json,\
- eth_getBlockByNumber/test_02.json,\
- eth_getBlockByNumber/test_04.json,\
- eth_getBlockByNumber/test_05.json,\
- eth_getBlockByNumber/test_06.json,\
- eth_getBlockByNumber/test_07.json,\
- eth_getBlockByNumber/test_08.json,\
- eth_getBlockByNumber/test_12.json,\
- eth_getBlockByNumber/test_13.json,\
- eth_getTransactionByHash/test_02.json,\
- eth_getWork/test_01.json,\
- eth_mining/test_01.json,\
- eth_protocolVersion/test_1.json,\
- eth_submitHashrate/test_1.json,\
- eth_submitWork/test_1.json,\
- net_peerCount/test_1.json,\
- net_version/test_1.json,\
- txpool_content/test_01.json,\
- txpool_status/test_1.json,\
- web3_clientVersion/test_1.json,\
- eth_estimateGas/test_14.json,\
- trace_replayBlockTransactions/test_29.tar
+ chmod +x ${{ runner.workspace }}/erigon/.github/workflows/scripts/run_rpc_tests.sh
+ ${{ runner.workspace }}/erigon/.github/workflows/scripts/run_rpc_tests.sh
# Capture test runner script exit status
test_exit_status=$?
diff --git a/.github/workflows/qa-snap-download.yml b/.github/workflows/qa-snap-download.yml
index e2fdc0f138a..fa0942406b8 100644
--- a/.github/workflows/qa-snap-download.yml
+++ b/.github/workflows/qa-snap-download.yml
@@ -2,7 +2,7 @@ name: QA - Snapshot Download
on:
schedule:
- - cron: '0 22 * * 1-6' # Run every night at 22:00 (10:00 PM) UTC except Sunday
+ - cron: '0 20 * * 1-6' # Run every night at 20:00 (08:00 PM) UTC except Sunday
workflow_dispatch: # Run manually
jobs:
diff --git a/.github/workflows/qa-tip-tracking.yml b/.github/workflows/qa-tip-tracking.yml
index 77f5fe9b4b5..aba0c4a5026 100644
--- a/.github/workflows/qa-tip-tracking.yml
+++ b/.github/workflows/qa-tip-tracking.yml
@@ -2,7 +2,7 @@ name: QA - Tip tracking
on:
schedule:
- - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday
+ - cron: '0 20 * * 1-6' # Run every night at 08:00 PM UTC except Sunday
workflow_dispatch: # Run manually
jobs:
diff --git a/.github/workflows/scripts/run_rpc_tests.sh b/.github/workflows/scripts/run_rpc_tests.sh
new file mode 100644
index 00000000000..0f94f68c039
--- /dev/null
+++ b/.github/workflows/scripts/run_rpc_tests.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+set +e # Disable exit on error
+
+# Array of disabled tests
+disabled_tests=(
+ # Erigon2 and Erigon3 never supported this api methods
+ trace_rawTransaction
+ # false positives: Erigon return expected response. but rpc-test somehow doesn't see 1 field.
+ erigon_getHeaderByHash,erigon_getHeaderByNumber,eth_feeHistory
+ # total difficulty field was removed, then added back
+ eth_getBlockByHash,eth_getBlockByNumber
+ # Erigon bugs
+ debug_accountRange,debug_storageRangeAt
+ # need update rpc-test - because Erigon is correct (@AskAlexSharov will do after https://github.com/erigontech/erigon/pull/12634)
+ debug_getModifiedAccountsByHash,debug_getModifiedAccountsByNumber
+ # Erigon bug https://github.com/erigontech/erigon/issues/12603
+ erigon_getLatestLogs,erigon_getLogsByHash/test_04.json
+ # Erigon bug https://github.com/erigontech/erigon/issues/12637
+ debug_traceBlockByNumber/test_05.tar
+ debug_traceBlockByNumber/test_08.tar
+ debug_traceBlockByNumber/test_09.tar
+ debug_traceBlockByNumber/test_10.tar
+ debug_traceBlockByNumber/test_11.tar
+ debug_traceBlockByNumber/test_12.tar
+ # remove this line after https://github.com/erigontech/rpc-tests/pull/281
+ parity_getBlockReceipts
+ # to investigate
+ debug_traceBlockByHash
+ debug_traceCallMany/test_02.tar
+ debug_traceCallMany/test_04.tar
+ debug_traceCallMany/test_05.tar
+ debug_traceCallMany/test_06.tar
+ debug_traceCallMany/test_07.tar
+ debug_traceCallMany/test_09.json
+ debug_traceCallMany/test_10.tar
+ engine_exchangeCapabilities/test_1.json
+ engine_exchangeTransitionConfigurationV1/test_01.json
+ engine_getClientVersionV1/test_1.json
+ erigon_getBalanceChangesInBlock
+ eth_createAccessList/test_16.json
+ admin_nodeInfo/test_01.json
+ admin_peers/test_01.json
+ erigon_nodeInfo/test_1.json
+ eth_coinbase/test_01.json
+ eth_getTransactionByHash/test_02.json
+ eth_getWork/test_01.json
+ eth_mining/test_01.json
+ eth_protocolVersion/test_1.json
+ eth_submitHashrate/test_1.json
+ eth_submitWork/test_1.json
+ net_peerCount/test_1.json
+ net_version/test_1.json
+ txpool_content/test_01.json
+ txpool_status/test_1.json
+ web3_clientVersion/test_1.json
+ eth_estimateGas/test_14.json
+ trace_replayBlockTransactions/test_29.tar
+ # recently started to fail
+ debug_traceTransaction/test_20.json
+ debug_traceTransaction/test_21.json
+ debug_traceTransaction/test_22.json
+ debug_traceTransaction/test_25.json
+ debug_traceTransaction/test_30.tar
+ debug_traceTransaction/test_33.json
+ debug_traceTransaction/test_35.tar
+ debug_traceTransaction/test_36.json
+ debug_traceTransaction/test_37.tar
+ debug_traceTransaction/test_38.tar
+ debug_traceTransaction/test_43.json
+ debug_traceTransaction/test_44.json
+ debug_traceTransaction/test_62.json
+ debug_traceTransaction/test_64.json
+ debug_traceTransaction/test_74.tar
+ debug_traceTransaction/test_75.tar
+ debug_traceTransaction/test_77.json
+ debug_traceTransaction/test_78.tar
+ debug_traceTransaction/test_79.tar
+ debug_traceTransaction/test_80.tar
+ debug_traceTransaction/test_81.tar
+ debug_traceTransaction/test_82.tar
+ debug_traceTransaction/test_83.tar
+ debug_traceTransaction/test_84.tar
+ debug_traceTransaction/test_85.tar
+ debug_traceTransaction/test_87.json
+ debug_traceTransaction/test_90.tar
+ debug_traceTransaction/test_91.tar
+ debug_traceTransaction/test_92.tar
+ debug_traceTransaction/test_93.json
+ debug_traceTransaction/test_96.json
+ trace_filter/test_16.json)
+
+# Transform the array into a comma-separated string
+disabled_test_list=$(IFS=,; echo "${disabled_tests[*]}")
+
+python3 ./run_tests.py -p 8545 --continue -f --json-diff -x "$disabled_test_list"
+
+exit $?
\ No newline at end of file
diff --git a/README.md b/README.md
index a863e843bb4..64f9ed319c5 100644
--- a/README.md
+++ b/README.md
@@ -203,18 +203,20 @@ du -hsc /erigon/snapshots/*
### Erigon3 changes from Erigon2
-- Initial sync does download LatestState and it's history - no re-exec from 0 anymore.
-- ExecutionStage included many E2 stages: stage_hash_state, stage_trie, log_index, history_index, trace_index
-- E3 can execute 1 historical transaction - without executing it's block - because history/indices have
- transaction-granularity, instead of block-granularity.
-- E3 doesn't store Logs (aka Receipts) - it always re-executing historical txn (but it's cheaper then in E2 - see point
- above).
-- Restart doesn't loose much partial progress: `--sync.loop.block.limit=5_000` enabled by default
-- `chaindata` is less than `15gb`. It's ok to `rm -rf chaindata`. To prevent it's grow: recommend `--batchSize <= 1G`
-- can symlink/mount latest state to fast drive and history to cheap drive
-- `--internalcl` is enabled by default. to disable use `--externalcl`
-- `--prune` flags changed: default `--prune.mode=archive`, FullNode: `--prune.mode=full`, MinimalNode (EIP-4444):
- `--prune.mode=minimal`.
+- **Initial sync doesn't re-exec from 0:** downloading 99% LatestState and History
+- **Per-Transaction granularity of history** (Erigon2 had per-block). Means:
+ - Can execute 1 historical transaction - without executing it's block
+ - If account X change V1->V2->V1 within 1 block (different transactions): `debug_getModifiedAccountsByNumber` return
+ it
+ - Erigon3 doesn't store Logs (aka Receipts) - it always re-executing historical txn (but it's cheaper)
+- **Validator mode**: added. `--internalcl` is enabled by default. to disable use `--externalcl`.
+- **Store most of data in immutable files (segments/snapshots):**
+ - can symlink/mount latest state to fast drive and history to cheap drive
+ - `chaindata` is less than `15gb`. It's ok to `rm -rf chaindata`. (to prevent grow: recommend `--batchSize <= 1G`)
+- **`--prune` flags changed**: see `--prune.mode` (default: `archive`, full: `full`, EIP-4444: `minimal`)
+- **Other changes:**
+ - ExecutionStage included many E2 stages: stage_hash_state, stage_trie, log_index, history_index, trace_index
+ - Restart doesn't loose much partial progress: `--sync.loop.block.limit=5_000` enabled by default
### Logging
diff --git a/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage.go b/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage.go
deleted file mode 100644
index 330c758e014..00000000000
--- a/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2024 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package diffstorage
-
-import (
- "bytes"
- "io"
- "sync"
-
- "github.com/alecthomas/atomic"
- libcommon "github.com/erigontech/erigon-lib/common"
-)
-
-const maxDumps = 8 // max number of dumps to keep in memory to prevent from memory leak during long non-finality.
-
-var bufferPool = sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
-}
-
-type link struct {
- from libcommon.Hash
- to libcommon.Hash
-}
-
-// Memory storage for binary diffs
-type ChainDiffStorage struct {
- dumps sync.Map
- parent sync.Map // maps child -> parent
- links sync.Map // maps root -> []links
- diffFn func(w io.Writer, old, new []byte) error
- applyFn func(in, out []byte, diff []byte, reverse bool) ([]byte, error)
- diffs sync.Map
- dumpsCount atomic.Int32 // prevent from memory leak during long non-finality.
-}
-
-func NewChainDiffStorage(diffFn func(w io.Writer, old, new []byte) error, applyFn func(in, out []byte, diff []byte, reverse bool) ([]byte, error)) *ChainDiffStorage {
- return &ChainDiffStorage{
- diffFn: diffFn,
- applyFn: applyFn,
- dumpsCount: atomic.NewInt32(0),
- }
-}
-
-func (c *ChainDiffStorage) Insert(root, parent libcommon.Hash, prevDump, dump []byte, isDump bool) error {
- c.parent.Store(root, parent)
- if isDump {
- c.dumpsCount.Add(1)
- if c.dumpsCount.Load() > maxDumps {
- *c = *NewChainDiffStorage(c.diffFn, c.applyFn)
- c.dumpsCount.Store(0)
- return nil
- }
- c.dumps.Store(root, libcommon.Copy(dump))
- return nil
- }
-
- buf := bufferPool.Get().(*bytes.Buffer)
- defer bufferPool.Put(buf)
- buf.Reset()
-
- if err := c.diffFn(buf, prevDump, dump); err != nil {
- return err
- }
- c.diffs.Store(link{from: parent, to: root}, libcommon.Copy(buf.Bytes()))
-
- links, _ := c.links.LoadOrStore(parent, []link{})
- c.links.Store(parent, append(links.([]link), link{from: parent, to: root}))
-
- return nil
-}
-
-func (c *ChainDiffStorage) Get(root libcommon.Hash) ([]byte, error) {
- dump, foundDump := c.dumps.Load(root)
- if foundDump {
- return dump.([]byte), nil
- }
- currentRoot := root
- diffs := [][]byte{}
- for !foundDump {
- parent, found := c.parent.Load(currentRoot)
- if !found {
- return nil, nil
- }
- diff, foundDiff := c.diffs.Load(link{from: parent.(libcommon.Hash), to: currentRoot})
- if !foundDiff {
- return nil, nil
- }
- diffs = append(diffs, diff.([]byte))
- currentRoot = parent.(libcommon.Hash)
- dump, foundDump = c.dumps.Load(currentRoot)
- }
- out := libcommon.Copy(dump.([]byte))
- for i := len(diffs) - 1; i >= 0; i-- {
- var err error
- out, err = c.applyFn(out, out, diffs[i], false)
- if err != nil {
- return nil, err
- }
- }
- return out, nil
-}
-
-func (c *ChainDiffStorage) Delete(root libcommon.Hash) {
- if _, loaded := c.dumps.LoadAndDelete(root); loaded {
- c.dumpsCount.Add(-1)
- }
- c.parent.Delete(root)
- links, ok := c.links.Load(root)
- if ok {
- for _, link := range links.([]link) {
- c.diffs.Delete(link)
- }
- }
- c.links.Delete(root)
-}
diff --git a/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage_test.go b/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage_test.go
deleted file mode 100644
index e4a6835dcd0..00000000000
--- a/cl/phase1/forkchoice/fork_graph/diff_storage/diff_storage_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2024 The Erigon Authors
-// This file is part of Erigon.
-//
-// Erigon is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Erigon is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Erigon. If not, see .
-
-package diffstorage
-
-import (
- "math"
- "testing"
-
- libcommon "github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon/cl/cltypes/solid"
- "github.com/erigontech/erigon/cl/persistence/base_encoding"
- "github.com/stretchr/testify/require"
-)
-
-// 1 -> 2 -> 3 -> 4 -> 5
-//
-// |
-// --> 6
-func TestDiffStorage(t *testing.T) {
- // decleare 5 nodes
- node1 := libcommon.Hash{1}
- node2 := libcommon.Hash{2}
- node3 := libcommon.Hash{3}
- node4 := libcommon.Hash{4}
- node5 := libcommon.Hash{5}
- node6 := libcommon.Hash{6}
-
- node1Content := []uint64{1, 2, 3, 4, 5}
- node2Content := []uint64{1, 2, 3, 4, 5, 6}
- node3Content := []uint64{1, 2, 3, 4, 5, 2, 7}
- node4Content := []uint64{1, 2, 3, 4, 5, 2, 7, 8}
- node5Content := []uint64{1, 6, 8, 4, 5, 2, 7, 8, 9}
- node6Content := []uint64{1, 2, 3, 4, 5, 2, 7, 10}
-
- exp1 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node1Content)
- exp2 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node2Content)
- exp3 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node3Content)
- exp4 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node4Content)
- exp5 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node5Content)
- exp6 := solid.NewUint64ListSSZFromSlice(math.MaxInt, node6Content)
-
- enc1, err := exp1.EncodeSSZ(nil)
- require.NoError(t, err)
- enc2, err := exp2.EncodeSSZ(nil)
- require.NoError(t, err)
- enc3, err := exp3.EncodeSSZ(nil)
- require.NoError(t, err)
- enc4, err := exp4.EncodeSSZ(nil)
- require.NoError(t, err)
- enc5, err := exp5.EncodeSSZ(nil)
- require.NoError(t, err)
- enc6, err := exp6.EncodeSSZ(nil)
- require.NoError(t, err)
-
- diffStorage := NewChainDiffStorage(base_encoding.ComputeCompressedSerializedUint64ListDiff, base_encoding.ApplyCompressedSerializedUint64ListDiff)
- diffStorage.Insert(node1, libcommon.Hash{}, nil, enc1, true)
- diffStorage.Insert(node2, node1, enc1, enc2, false)
- diffStorage.Insert(node3, node2, enc2, enc3, false)
- diffStorage.Insert(node4, node3, enc3, enc4, false)
- diffStorage.Insert(node5, node4, enc4, enc5, false)
- diffStorage.Insert(node6, node2, enc2, enc6, false)
-
- d1, err := diffStorage.Get(node1)
- require.NoError(t, err)
- require.Equal(t, enc1, d1)
-
- d2, err := diffStorage.Get(node2)
- require.NoError(t, err)
- require.Equal(t, enc2, d2)
-
- d3, err := diffStorage.Get(node3)
- require.NoError(t, err)
- require.Equal(t, enc3, d3)
-
- d4, err := diffStorage.Get(node4)
- require.NoError(t, err)
- require.Equal(t, enc4, d4)
-
- d5, err := diffStorage.Get(node5)
- require.NoError(t, err)
- require.Equal(t, enc5, d5)
-
- d6, err := diffStorage.Get(node6)
- require.NoError(t, err)
- require.Equal(t, enc6, d6)
-}
diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
index bcef69833ed..3068031bebb 100644
--- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
+++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
@@ -23,7 +23,6 @@ import (
"sync"
"sync/atomic"
- "github.com/klauspost/compress/zstd"
"github.com/spf13/afero"
libcommon "github.com/erigontech/erigon-lib/common"
@@ -34,9 +33,7 @@ import (
"github.com/erigontech/erigon/cl/cltypes"
"github.com/erigontech/erigon/cl/cltypes/lightclient_utils"
"github.com/erigontech/erigon/cl/cltypes/solid"
- "github.com/erigontech/erigon/cl/persistence/base_encoding"
"github.com/erigontech/erigon/cl/phase1/core/state"
- diffstorage "github.com/erigontech/erigon/cl/phase1/forkchoice/fork_graph/diff_storage"
"github.com/erigontech/erigon/cl/transition"
"github.com/erigontech/erigon/cl/transition/impl/eth2"
)
@@ -48,26 +45,6 @@ type syncCommittees struct {
nextSyncCommittee *solid.SyncCommittee
}
-var compressorPool = sync.Pool{
- New: func() interface{} {
- w, err := zstd.NewWriter(nil)
- if err != nil {
- panic(err)
- }
- return w
- },
-}
-
-var decompressPool = sync.Pool{
- New: func() interface{} {
- r, err := zstd.NewReader(nil)
- if err != nil {
- panic(err)
- }
- return r
- },
-}
-
var ErrStateNotFound = errors.New("state not found")
type ChainSegmentInsertionResult uint
@@ -132,12 +109,9 @@ type forkGraphDisk struct {
// for each block root we keep track of the sync committees for head retrieval.
syncCommittees sync.Map
lightclientBootstraps sync.Map
- // diffs storage
- balancesStorage *diffstorage.ChainDiffStorage
- validatorSetStorage *diffstorage.ChainDiffStorage
- inactivityScoresStorage *diffstorage.ChainDiffStorage
- previousIndicies sync.Map
- currentIndicies sync.Map
+
+ previousIndicies sync.Map
+ currentIndicies sync.Map
// configurations
beaconCfg *clparams.BeaconChainConfig
@@ -172,23 +146,16 @@ func NewForkGraphDisk(anchorState *state.CachingBeaconState, aferoFs afero.Fs, r
farthestExtendingPath[anchorRoot] = true
- balancesStorage := diffstorage.NewChainDiffStorage(base_encoding.ComputeCompressedSerializedUint64ListDiff, base_encoding.ApplyCompressedSerializedUint64ListDiff)
- validatorSetStorage := diffstorage.NewChainDiffStorage(base_encoding.ComputeCompressedSerializedValidatorSetListDiff, base_encoding.ApplyCompressedSerializedValidatorListDiff)
- inactivityScoresStorage := diffstorage.NewChainDiffStorage(base_encoding.ComputeCompressedSerializedUint64ListDiff, base_encoding.ApplyCompressedSerializedUint64ListDiff)
-
f := &forkGraphDisk{
fs: aferoFs,
// current state data
currentState: anchorState,
// configuration
- beaconCfg: anchorState.BeaconConfig(),
- genesisTime: anchorState.GenesisTime(),
- anchorSlot: anchorState.Slot(),
- balancesStorage: balancesStorage,
- validatorSetStorage: validatorSetStorage,
- inactivityScoresStorage: inactivityScoresStorage,
- rcfg: rcfg,
- emitter: emitter,
+ beaconCfg: anchorState.BeaconConfig(),
+ genesisTime: anchorState.GenesisTime(),
+ anchorSlot: anchorState.Slot(),
+ rcfg: rcfg,
+ emitter: emitter,
}
f.lowestAvailableBlock.Store(anchorState.Slot())
f.headers.Store(libcommon.Hash(anchorRoot), &anchorHeader)
@@ -280,13 +247,7 @@ func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock,
}
blockRewardsCollector := ð2.BlockRewardsCollector{}
- var prevDumpBalances, prevValidatorSetDump, prevInactivityScores []byte
- epochCross := newState.Slot()/f.beaconCfg.SlotsPerEpoch != block.Slot/f.beaconCfg.SlotsPerEpoch
- if (f.rcfg.Beacon || f.rcfg.Validator || f.rcfg.Lighthouse) && !epochCross {
- prevDumpBalances = libcommon.Copy(newState.RawBalances())
- prevValidatorSetDump = libcommon.Copy(newState.RawValidatorSet())
- prevInactivityScores = libcommon.Copy(newState.RawInactivityScores())
- }
+
// Execute the state
if invalidBlockErr := transition.TransitionState(newState, signedBlock, blockRewardsCollector, fullValidation); invalidBlockErr != nil {
// Add block to list of invalid blocks
@@ -302,11 +263,9 @@ func (f *forkGraphDisk) AddChainSegment(signedBlock *cltypes.SignedBeaconBlock,
if block.Version() != clparams.Phase0Version {
f.currentIndicies.Store(libcommon.Hash(blockRoot), libcommon.Copy(newState.RawCurrentEpochParticipation()))
f.previousIndicies.Store(libcommon.Hash(blockRoot), libcommon.Copy(newState.RawPreviousEpochParticipation()))
- f.inactivityScoresStorage.Insert(libcommon.Hash(blockRoot), block.ParentRoot, prevInactivityScores, newState.RawInactivityScores(), epochCross)
}
f.blockRewards.Store(libcommon.Hash(blockRoot), blockRewardsCollector)
- f.balancesStorage.Insert(libcommon.Hash(blockRoot), block.ParentRoot, prevDumpBalances, newState.RawBalances(), epochCross)
- f.validatorSetStorage.Insert(libcommon.Hash(blockRoot), block.ParentRoot, prevValidatorSetDump, newState.RawValidatorSet(), epochCross)
+
period := f.beaconCfg.SyncCommitteePeriod(newState.Slot())
f.syncCommittees.Store(period, syncCommittees{
currentSyncCommittee: newState.CurrentSyncCommittee().Copy(),
@@ -474,9 +433,7 @@ func (f *forkGraphDisk) Prune(pruneSlot uint64) (err error) {
f.blockRewards.Delete(root)
f.fs.Remove(getBeaconStateFilename(root))
f.fs.Remove(getBeaconStateCacheFilename(root))
- f.balancesStorage.Delete(root)
- f.validatorSetStorage.Delete(root)
- f.inactivityScoresStorage.Delete(root)
+
f.previousIndicies.Delete(root)
f.currentIndicies.Delete(root)
}
@@ -529,27 +486,25 @@ func (f *forkGraphDisk) GetLightClientUpdate(period uint64) (*cltypes.LightClien
}
func (f *forkGraphDisk) GetBalances(blockRoot libcommon.Hash) (solid.Uint64ListSSZ, error) {
- b, err := f.balancesStorage.Get(blockRoot)
+ st, err := f.GetState(blockRoot, true)
if err != nil {
return nil, err
}
- if len(b) == 0 {
- return nil, nil
+ if st == nil {
+ return nil, ErrStateNotFound
}
- out := solid.NewUint64ListSSZ(int(f.beaconCfg.ValidatorRegistryLimit))
- return out, out.DecodeSSZ(b, 0)
+ return st.Balances(), nil
}
func (f *forkGraphDisk) GetInactivitiesScores(blockRoot libcommon.Hash) (solid.Uint64ListSSZ, error) {
- b, err := f.inactivityScoresStorage.Get(blockRoot)
+ st, err := f.GetState(blockRoot, true)
if err != nil {
return nil, err
}
- if len(b) == 0 {
- return nil, nil
+ if st == nil {
+ return nil, ErrStateNotFound
}
- out := solid.NewUint64ListSSZ(int(f.beaconCfg.ValidatorRegistryLimit))
- return out, out.DecodeSSZ(b, 0)
+ return st.InactivityScores(), nil
}
func (f *forkGraphDisk) GetPreviousParticipationIndicies(blockRoot libcommon.Hash) (*solid.ParticipationBitList, error) {
@@ -577,13 +532,12 @@ func (f *forkGraphDisk) GetCurrentParticipationIndicies(blockRoot libcommon.Hash
}
func (f *forkGraphDisk) GetValidatorSet(blockRoot libcommon.Hash) (*solid.ValidatorSet, error) {
- b, err := f.validatorSetStorage.Get(blockRoot)
+ st, err := f.GetState(blockRoot, true)
if err != nil {
return nil, err
}
- if len(b) == 0 {
- return nil, nil
+ if st == nil {
+ return nil, ErrStateNotFound
}
- out := solid.NewValidatorSet(int(f.beaconCfg.ValidatorRegistryLimit))
- return out, out.DecodeSSZ(b, 0)
+ return st.ValidatorSet(), nil
}
diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go
index 902426d7801..11a8bc001d1 100644
--- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go
+++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go
@@ -24,7 +24,6 @@ import (
"os"
"github.com/golang/snappy"
- "github.com/klauspost/compress/zstd"
"github.com/spf13/afero"
libcommon "github.com/erigontech/erigon-lib/common"
@@ -94,12 +93,7 @@ func (f *forkGraphDisk) readBeaconStateFromDisk(blockRoot libcommon.Hash) (bs *s
}
defer cacheFile.Close()
- reader := decompressPool.Get().(*zstd.Decoder)
- defer decompressPool.Put(reader)
-
- reader.Reset(cacheFile)
-
- if err := bs.DecodeCaches(reader); err != nil {
+ if err := bs.DecodeCaches(cacheFile); err != nil {
return nil, err
}
@@ -162,19 +156,13 @@ func (f *forkGraphDisk) DumpBeaconStateOnDisk(blockRoot libcommon.Hash, bs *stat
}
defer cacheFile.Close()
- writer := compressorPool.Get().(*zstd.Encoder)
- defer compressorPool.Put(writer)
-
- writer.Reset(cacheFile)
- defer writer.Close()
-
- if err := bs.EncodeCaches(writer); err != nil {
+ if err := bs.EncodeCaches(cacheFile); err != nil {
return err
}
- if err = writer.Close(); err != nil {
+
+ if err = cacheFile.Sync(); err != nil {
return
}
- err = cacheFile.Sync()
return
}
diff --git a/cmd/devnet/devnetutils/utils.go b/cmd/devnet/devnetutils/utils.go
index 4b9e3fe6c97..c88cbce96b2 100644
--- a/cmd/devnet/devnetutils/utils.go
+++ b/cmd/devnet/devnetutils/utils.go
@@ -117,14 +117,14 @@ func UniqueIDFromEnode(enode string) (string, error) {
return enode[:i], nil
}
-func RandomInt(max int) int {
- if max == 0 {
+func RandomInt(_max int) int {
+ if _max == 0 {
return 0
}
var n uint16
binary.Read(rand.Reader, binary.LittleEndian, &n)
- return int(n) % (max + 1)
+ return int(n) % (_max + 1)
}
// NamespaceAndSubMethodFromMethod splits a parent method into namespace and the actual method
@@ -142,10 +142,10 @@ func GenerateTopic(signature string) []libcommon.Hash {
}
// RandomNumberInRange returns a random number between min and max NOT inclusive
-func RandomNumberInRange(min, max uint64) (uint64, error) {
- if max <= min {
- return 0, fmt.Errorf("Invalid range: upper bound %d less or equal than lower bound %d", max, min)
+func RandomNumberInRange(_min, _max uint64) (uint64, error) {
+ if _max <= _min {
+ return 0, fmt.Errorf("Invalid range: upper bound %d less or equal than lower bound %d", _max, _min)
}
- return uint64(RandomInt(int(max-min)) + int(min)), nil
+ return uint64(RandomInt(int(_max-_min)) + int(_min)), nil
}
diff --git a/cmd/state/exec3/trace_worker.go b/cmd/state/exec3/trace_worker.go
index 7c9ceeb8e79..7b80c49992b 100644
--- a/cmd/state/exec3/trace_worker.go
+++ b/cmd/state/exec3/trace_worker.go
@@ -74,11 +74,12 @@ func NewTraceWorker(tx kv.TemporalTx, cc *chain.Config, engine consensus.EngineR
stateReader: stateReader,
tracer: tracer,
evm: vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, cc, vm.Config{}),
- vmConfig: &vm.Config{},
+ vmConfig: &vm.Config{NoBaseFee: true},
ibs: state.New(stateReader),
}
if tracer != nil {
- ie.vmConfig = &vm.Config{Debug: true, Tracer: tracer}
+ ie.vmConfig.Debug = true
+ ie.vmConfig.Tracer = tracer
}
return ie
}
diff --git a/common/fdlimit/fdlimit_darwin.go b/common/fdlimit/fdlimit_darwin.go
index c59be293476..7d8b7f2fd5c 100644
--- a/common/fdlimit/fdlimit_darwin.go
+++ b/common/fdlimit/fdlimit_darwin.go
@@ -27,7 +27,7 @@ const hardlimit = 10240
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
// Returns the size it was set to (may differ from the desired 'max')
-func Raise(max uint64) (uint64, error) {
+func Raise(_max uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
@@ -35,8 +35,8 @@ func Raise(max uint64) (uint64, error) {
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
- if limit.Cur > max {
- limit.Cur = max
+ if limit.Cur > _max {
+ limit.Cur = _max
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
diff --git a/common/fdlimit/fdlimit_unix.go b/common/fdlimit/fdlimit_unix.go
index 2f3ac908cc8..eebb72fde2e 100644
--- a/common/fdlimit/fdlimit_unix.go
+++ b/common/fdlimit/fdlimit_unix.go
@@ -26,7 +26,7 @@ import "syscall"
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
// Returns the size it was set to (may differ from the desired 'max')
-func Raise(max uint64) (uint64, error) {
+func Raise(_max uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
@@ -34,8 +34,8 @@ func Raise(max uint64) (uint64, error) {
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
- if limit.Cur > max {
- limit.Cur = max
+ if limit.Cur > _max {
+ limit.Cur = _max
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
diff --git a/common/fdlimit/fdlimit_windows.go b/common/fdlimit/fdlimit_windows.go
index c7897072626..5a1137050bc 100644
--- a/common/fdlimit/fdlimit_windows.go
+++ b/common/fdlimit/fdlimit_windows.go
@@ -26,17 +26,17 @@ const hardlimit = 16384
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
-func Raise(max uint64) (uint64, error) {
+func Raise(_max uint64) (uint64, error) {
// This method is NOP by design:
// * Linux/Darwin counterparts need to manually increase per process limits
// * On Windows Go uses the CreateFile API, which is limited to 16K files, non
// changeable from within a running process
// This way we can always "request" raising the limits, which will either have
// or not have effect based on the platform we're running on.
- if max > hardlimit {
+ if _max > hardlimit {
return hardlimit, fmt.Errorf("file descriptor limit (%d) reached", hardlimit)
}
- return max, nil
+ return _max, nil
}
// Current retrieves the number of file descriptors allowed to be opened by this
diff --git a/consensus/ethash/consensus_test.go b/consensus/ethash/consensus_test.go
index 7ac1e5d5dd0..4436a0bfa46 100644
--- a/consensus/ethash/consensus_test.go
+++ b/consensus/ethash/consensus_test.go
@@ -95,11 +95,11 @@ func TestCalcDifficulty(t *testing.T) {
}
}
-func randSlice(min, max uint32) []byte {
+func randSlice(_min, _max uint32) []byte {
var b = make([]byte, 4)
rand.Read(b)
a := binary.LittleEndian.Uint32(b)
- size := min + a%(max-min)
+ size := _min + a%(_max-_min)
out := make([]byte, size)
rand.Read(out)
return out
diff --git a/core/types/encdec_test.go b/core/types/encdec_test.go
index 69a19c5a0c1..296f5467fec 100644
--- a/core/types/encdec_test.go
+++ b/core/types/encdec_test.go
@@ -44,8 +44,8 @@ func NewTRand() *TRand {
return &TRand{rnd: rand.New(src)}
}
-func (tr *TRand) RandIntInRange(min, max int) int {
- return (tr.rnd.Intn(max-min) + min)
+func (tr *TRand) RandIntInRange(_min, _max int) int {
+ return (tr.rnd.Intn(_max-_min) + _min)
}
func (tr *TRand) RandUint64() *uint64 {
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index b5aeb0f98b6..fde05e8680e 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -549,8 +549,8 @@ const N = 50
var dummyBlobTxs = [N]*BlobTx{}
var dummyBlobWrapperTxs = [N]*BlobTxWrapper{}
-func randIntInRange(min, max int) int {
- return (rand.Intn(max-min) + min)
+func randIntInRange(_min, _max int) int {
+ return (rand.Intn(_max-_min) + _min)
}
func randAddr() *libcommon.Address {
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 1ec0c0ff645..fb46915fed3 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -134,6 +134,11 @@ func (evm *EVM) Reset(txCtx evmtypes.TxContext, ibs evmtypes.IntraBlockState) {
}
func (evm *EVM) ResetBetweenBlocks(blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, ibs evmtypes.IntraBlockState, vmConfig Config, chainRules *chain.Rules) {
+ if vmConfig.NoBaseFee {
+ if txCtx.GasPrice.IsZero() {
+ blockCtx.BaseFee = new(uint256.Int)
+ }
+ }
evm.Context = blockCtx
evm.TxContext = txCtx
evm.intraBlockState = ibs
diff --git a/docs/programmers_guide/guide.md b/docs/programmers_guide/guide.md
index a21749fdc80..fb1f99f9654 100644
--- a/docs/programmers_guide/guide.md
+++ b/docs/programmers_guide/guide.md
@@ -65,16 +65,16 @@ Accounts are identified by their addresses. Address is a 20-byte binary string,
contract and non-contract accounts.
For non-contract accounts, the address is derived from the public key, by hashing it and taking lowest 20 bytes of the
-32-byte hash value, as shown in the function `PubkeyToAddress` in the file [crypto/crypto.go](../../crypto/crypto.go)
+32-byte hash value, as shown in the function `PubkeyToAddress` in the file [crypto/crypto.go](../../erigon-lib/crypto/crypto.go)
For smart contract accounts created by a transaction without destination, or by `CREATE` opcode, the address is derived
from the address and the nonce of the creator, as shown in the function `CreateAddress` in the
-file [crypto/crypto.go](../../crypto/crypto.go)
+file [crypto/crypto.go](../../erigon-lib/crypto/crypto.go)
For smart contract accounts created by `CREATE2` opcode, the address is derived from the creator's address, salt (
256-bit argument supplied to the `CREATE2` invocation), and the code hash of the initialisation code (code that is
executed to output the actual, deployed code of the new contract), as shown in the function `CreateAddress2` in the
-file [crypto/crypto.go](../../crypto/crypto.go)
+file [crypto/crypto.go](../../erigon-lib/crypto/crypto.go)
In many places in the code, sets of accounts are represented by mappings from account addresses to the objects
representing the accounts themselves, for example, field `stateObjects` in the
diff --git a/erigon-lib/.golangci.yml b/erigon-lib/.golangci.yml
index d66cd898232..7f4a485356d 100644
--- a/erigon-lib/.golangci.yml
+++ b/erigon-lib/.golangci.yml
@@ -22,6 +22,7 @@ linters:
- testifylint #TODO: enable me
- perfsprint #TODO: enable me
- protogetter
+ - typecheck
enable:
- unconvert
- predeclared
@@ -111,6 +112,7 @@ issues:
- unused
- gocritic
- perfsprint
+ - typecheck
- path: hack\.go
linters:
- gosec
diff --git a/erigon-lib/chain/snapcfg/util.go b/erigon-lib/chain/snapcfg/util.go
index f60de31aaff..6a13bff1e6a 100644
--- a/erigon-lib/chain/snapcfg/util.go
+++ b/erigon-lib/chain/snapcfg/util.go
@@ -245,7 +245,7 @@ func (p Preverified) Versioned(preferredVersion snaptype.Version, minVersion sna
}
func (p Preverified) MaxBlock(version snaptype.Version) (uint64, error) {
- max := uint64(0)
+ _max := uint64(0)
for _, p := range p {
_, fileName := filepath.Split(p.Name)
ext := filepath.Ext(fileName)
@@ -261,16 +261,16 @@ func (p Preverified) MaxBlock(version snaptype.Version) (uint64, error) {
return 0, err
}
- if max < to {
- max = to
+ if _max < to {
+ _max = to
}
}
- if max == 0 { // to prevent underflow
+ if _max == 0 { // to prevent underflow
return 0, nil
}
- return max*1_000 - 1, nil
+ return _max*1_000 - 1, nil
}
var errWrongVersion = errors.New("wrong version")
@@ -464,17 +464,17 @@ func MergeLimitFromCfg(cfg *Cfg, snapType snaptype.Enum, fromBlock uint64) uint6
}
func MaxSeedableSegment(chain string, dir string) uint64 {
- var max uint64
+ var _max uint64
if list, err := snaptype.Segments(dir); err == nil {
for _, info := range list {
- if Seedable(chain, info) && info.Type.Enum() == snaptype.MinCoreEnum && info.To > max {
- max = info.To
+ if Seedable(chain, info) && info.Type.Enum() == snaptype.MinCoreEnum && info.To > _max {
+ _max = info.To
}
}
}
- return max
+ return _max
}
var oldMergeSteps = append([]uint64{snaptype.Erigon2OldMergeLimit}, snaptype.MergeSteps...)
@@ -498,14 +498,14 @@ func KnownCfg(networkName string) *Cfg {
return newCfg(networkName, c.Typed(knownTypes[networkName]))
}
-func VersionedCfg(networkName string, preferred snaptype.Version, min snaptype.Version) *Cfg {
+func VersionedCfg(networkName string, preferred snaptype.Version, _min snaptype.Version) *Cfg {
c, ok := knownPreverified[networkName]
if !ok {
return newCfg(networkName, Preverified{})
}
- return newCfg(networkName, c.Versioned(preferred, min))
+ return newCfg(networkName, c.Versioned(preferred, _min))
}
var KnownWebseeds = map[string][]string{
diff --git a/erigon-lib/common/cmp/cmp.go b/erigon-lib/common/cmp/cmp.go
index 8ee45182c17..db832450987 100644
--- a/erigon-lib/common/cmp/cmp.go
+++ b/erigon-lib/common/cmp/cmp.go
@@ -21,12 +21,12 @@ import (
)
// InRange - ensure val is in [min,max] range
-func InRange[T cmp.Ordered](min, max, val T) T {
- if min >= val {
- return min
+func InRange[T cmp.Ordered](_min, _max, val T) T {
+ if _min >= val {
+ return _min
}
- if max <= val {
- return max
+ if _max <= val {
+ return _max
}
return val
}
diff --git a/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go b/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go
index 072e32b0888..23df6f186f4 100644
--- a/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go
+++ b/erigon-lib/crypto/bn256/cloudflare/gfp_decl.go
@@ -11,7 +11,7 @@ import (
var hasBMI2 = cpu.X86.HasBMI2
-// go:noescape
+//go:noescape
func gfpNeg(c, a *gfP)
//go:noescape
diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go
index cf89315feb3..6fb5eb1cbfc 100644
--- a/erigon-lib/downloader/downloader.go
+++ b/erigon-lib/downloader/downloader.go
@@ -174,7 +174,7 @@ func insertCloudflareHeaders(req *http.Request) {
// It also tries to parse Retry-After response header when a http.StatusTooManyRequests
// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of
// seconds the server states it may be ready to process more requests from this client.
-func calcBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
+func calcBackoff(_min, _max time.Duration, attemptNum int, resp *http.Response) time.Duration {
if resp != nil {
if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable {
if s, ok := resp.Header["Retry-After"]; ok {
@@ -185,10 +185,10 @@ func calcBackoff(min, max time.Duration, attemptNum int, resp *http.Response) ti
}
}
- mult := math.Pow(2, float64(attemptNum)) * float64(min)
+ mult := math.Pow(2, float64(attemptNum)) * float64(_min)
sleep := time.Duration(mult)
- if float64(sleep) != mult || sleep > max {
- sleep = max
+ if float64(sleep) != mult || sleep > _max {
+ sleep = _max
}
return sleep
diff --git a/erigon-lib/rlp2/util.go b/erigon-lib/rlp2/util.go
index 7cb1b78ed10..c3d1de93d81 100644
--- a/erigon-lib/rlp2/util.go
+++ b/erigon-lib/rlp2/util.go
@@ -76,7 +76,7 @@ func identifyToken(b byte) Token {
return TokenLongBlob
case b >= 192 && b <= 247:
return TokenShortList
- case b >= 248 && b <= 255:
+ case b >= 248:
return TokenLongList
}
return TokenUnknown
diff --git a/erigon-lib/seg/decompress.go b/erigon-lib/seg/decompress.go
index 44b890b064e..ca257a11bae 100644
--- a/erigon-lib/seg/decompress.go
+++ b/erigon-lib/seg/decompress.go
@@ -23,6 +23,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "sort"
"strconv"
"sync/atomic"
"time"
@@ -1064,3 +1065,28 @@ func (g *Getter) FastNext(buf []byte) ([]byte, uint64) {
g.dataBit = 0
return buf[:wordLen], postLoopPos
}
+
+// BinarySearch - !expecting sorted file - does Seek `g` to key which >= `fromPrefix` by using BinarySearch - means unoptimal and touching many places in file
+// use `.Next` to read found
+// at `ok = false` leaving `g` in unpredictible state
+func (g *Getter) BinarySearch(seek []byte, count int, getOffset func(i uint64) (offset uint64)) (foundOffset uint64, ok bool) {
+ var key []byte
+ foundItem := sort.Search(count, func(i int) bool {
+ offset := getOffset(uint64(i))
+ g.Reset(offset)
+ if g.HasNext() {
+ key, _ = g.Next(key[:0])
+ return bytes.Compare(key, seek) >= 0
+ }
+ return false
+ })
+ if foundItem == count { // `Search` returns `n` if not found
+ return 0, false
+ }
+ foundOffset = getOffset(uint64(foundItem))
+ g.Reset(foundOffset)
+ if !g.HasNext() {
+ return 0, false
+ }
+ return foundOffset, true
+}
diff --git a/erigon-lib/seg/decompress_test.go b/erigon-lib/seg/decompress_test.go
index f43f0e08048..1568b06f4d9 100644
--- a/erigon-lib/seg/decompress_test.go
+++ b/erigon-lib/seg/decompress_test.go
@@ -24,6 +24,7 @@ import (
"math/rand"
"os"
"path/filepath"
+ "slices"
"strings"
"testing"
"time"
@@ -257,22 +258,23 @@ func prepareLoremDictUncompressed(t *testing.T) *Decompressor {
cfg.MinPatternScore = 1
cfg.Workers = 2
c, err := NewCompressor(context.Background(), t.Name(), file, tmpDir, cfg, log.LvlDebug, logger)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
defer c.Close()
+ slices.Sort(loremStrings)
for k, w := range loremStrings {
- if err = c.AddUncompressedWord([]byte(fmt.Sprintf("%s %d", w, k))); err != nil {
- t.Fatal(err)
+ if len(w) == 0 {
+ err = c.AddUncompressedWord([]byte(w))
+ require.NoError(t, err)
+ continue
}
+ err = c.AddUncompressedWord([]byte(fmt.Sprintf("%s %d", w, k)))
+ require.NoError(t, err)
}
- if err = c.Compress(); err != nil {
- t.Fatal(err)
- }
- var d *Decompressor
- if d, err = NewDecompressor(file); err != nil {
- t.Fatal(err)
- }
+ err = c.Compress()
+ require.NoError(t, err)
+ d, err := NewDecompressor(file)
+ require.NoError(t, err)
+ t.Cleanup(d.Close)
return d
}
@@ -281,16 +283,60 @@ func TestUncompressed(t *testing.T) {
defer d.Close()
g := d.MakeGetter()
i := 0
+ var offsets []uint64
+ offsets = append(offsets, 0)
for g.HasNext() {
w := loremStrings[i]
expected := []byte(fmt.Sprintf("%s %d", w, i+1))
expected = expected[:len(expected)/2]
- actual, _ := g.NextUncompressed()
+ actual, offset := g.NextUncompressed()
if bytes.Equal(expected, actual) {
t.Errorf("expected %s, actual %s", expected, actual)
}
i++
- }
+ offsets = append(offsets, offset)
+ }
+
+ t.Run("BinarySearch middle", func(t *testing.T) {
+ require := require.New(t)
+ _, ok := g.BinarySearch([]byte("ipsum"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] })
+ require.True(ok)
+ k, _ := g.Next(nil)
+ require.Equal("ipsum 38", string(k))
+ _, ok = g.BinarySearch([]byte("ipsu"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] })
+ require.True(ok)
+ k, _ = g.Next(nil)
+ require.Equal("ipsum 38", string(k))
+ })
+ t.Run("BinarySearch end of file", func(t *testing.T) {
+ require := require.New(t)
+ //last word is `voluptate`
+ _, ok := g.BinarySearch([]byte("voluptate"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] })
+ require.True(ok)
+ k, _ := g.Next(nil)
+ require.Equal("voluptate 69", string(k))
+ _, ok = g.BinarySearch([]byte("voluptat"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] })
+ require.True(ok)
+ k, _ = g.Next(nil)
+ require.Equal("voluptate 69", string(k))
+ _, ok = g.BinarySearch([]byte("voluptatez"), d.Count(), func(i uint64) (offset uint64) { return offsets[i] })
+ require.False(ok)
+ })
+
+ t.Run("BinarySearch begin of file", func(t *testing.T) {
+ require := require.New(t)
+ //first word is ``
+ _, ok := g.BinarySearch([]byte(""), d.Count(), func(i uint64) (offset uint64) { return offsets[i] })
+ require.True(ok)
+ k, _ := g.Next(nil)
+ require.Equal("", string(k))
+
+ _, ok = g.BinarySearch(nil, d.Count(), func(i uint64) (offset uint64) { return offsets[i] })
+ require.True(ok)
+ k, _ = g.Next(nil)
+ require.Equal("", string(k))
+ })
+
}
func TestDecompressor_OpenCorrupted(t *testing.T) {
@@ -461,12 +507,15 @@ func TestDecompressor_OpenCorrupted(t *testing.T) {
})
}
-const lorem = `Lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et
-dolore magna aliqua Ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
-consequat Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur
-Excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est laborum`
+const lorem = `lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et
+dolore magna aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
+consequat duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur
+excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est laborum`
-var loremStrings = strings.Split(lorem, " ")
+var loremStrings = append(strings.Split(rmNewLine(lorem), " "), "") // including emtpy string - to trigger corner cases
+func rmNewLine(s string) string {
+ return strings.ReplaceAll(strings.ReplaceAll(s, "\n", " "), "\r", "")
+}
func TestDecompressTorrent(t *testing.T) {
t.Skip()
@@ -517,10 +566,6 @@ func generateRandWords() {
WORDS[N-1] = []byte{}
}
-func randIntInRange(min, max int) int {
- return (rand.Intn(max-min) + min)
-}
-
func clearPrevDict() {
WORDS = [N][]byte{}
WORD_FLAGS = [N]bool{}
diff --git a/erigon-lib/state/aggregator_bench_test.go b/erigon-lib/state/aggregator_bench_test.go
index 894563f9eef..089884db80a 100644
--- a/erigon-lib/state/aggregator_bench_test.go
+++ b/erigon-lib/state/aggregator_bench_test.go
@@ -20,7 +20,6 @@ import (
"bytes"
"context"
"fmt"
- "math/rand"
"os"
"path"
"path/filepath"
@@ -109,7 +108,7 @@ func BenchmarkAggregator_Processing(b *testing.B) {
}
func queueKeys(ctx context.Context, seed, ofSize uint64) <-chan []byte {
- rnd := rand.New(rand.NewSource(int64(seed)))
+ rnd := newRnd(seed)
keys := make(chan []byte, 1)
go func() {
for {
@@ -127,10 +126,10 @@ func queueKeys(ctx context.Context, seed, ofSize uint64) <-chan []byte {
}
func Benchmark_BtreeIndex_Allocation(b *testing.B) {
- rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+ rnd := newRnd(uint64(time.Now().UnixNano()))
for i := 0; i < b.N; i++ {
now := time.Now()
- count := rnd.Intn(1000000000)
+ count := rnd.IntN(1000000000)
bt := newBtAlloc(uint64(count), uint64(1<<12), true, nil, nil)
bt.traverseDfs()
fmt.Printf("alloc %v\n", time.Since(now))
@@ -139,7 +138,7 @@ func Benchmark_BtreeIndex_Allocation(b *testing.B) {
func Benchmark_BtreeIndex_Search(b *testing.B) {
logger := log.New()
- rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+ rnd := newRnd(uint64(time.Now().UnixNano()))
tmp := b.TempDir()
defer os.RemoveAll(tmp)
dataPath := "../../data/storage.256-288.kv"
@@ -159,7 +158,7 @@ func Benchmark_BtreeIndex_Search(b *testing.B) {
getter := seg.NewReader(kv.MakeGetter(), comp)
for i := 0; i < b.N; i++ {
- p := rnd.Intn(len(keys))
+ p := rnd.IntN(len(keys))
cur, err := bt.Seek(getter, keys[p])
require.NoErrorf(b, err, "i=%d", i)
require.EqualValues(b, keys[p], cur.Key())
@@ -193,12 +192,12 @@ func Benchmark_BTree_Seek(b *testing.B) {
M := uint64(1024)
compress := seg.CompressNone
kv, bt, keys, _ := benchInitBtreeIndex(b, M, compress)
- rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+ rnd := newRnd(uint64(time.Now().UnixNano()))
getter := seg.NewReader(kv.MakeGetter(), compress)
b.Run("seek_only", func(b *testing.B) {
for i := 0; i < b.N; i++ {
- p := rnd.Intn(len(keys))
+ p := rnd.IntN(len(keys))
cur, err := bt.Seek(getter, keys[p])
require.NoError(b, err)
@@ -209,7 +208,7 @@ func Benchmark_BTree_Seek(b *testing.B) {
b.Run("seek_then_next", func(b *testing.B) {
for i := 0; i < b.N; i++ {
- p := rnd.Intn(len(keys))
+ p := rnd.IntN(len(keys))
cur, err := bt.Seek(getter, keys[p])
require.NoError(b, err)
@@ -249,7 +248,7 @@ func Benchmark_Recsplit_Find_ExternalFile(b *testing.B) {
b.Skip("requires existing KV index file at ../../data/storage.kv")
}
- rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+ rnd := newRnd(uint64(time.Now().UnixNano()))
tmp := b.TempDir()
defer os.RemoveAll(tmp)
@@ -269,7 +268,7 @@ func Benchmark_Recsplit_Find_ExternalFile(b *testing.B) {
require.NoError(b, err)
for i := 0; i < b.N; i++ {
- p := rnd.Intn(len(keys))
+ p := rnd.IntN(len(keys))
offset, _ := idxr.Lookup(keys[p])
getter.Reset(offset)
diff --git a/erigon-lib/state/aggregator_fuzz_test.go b/erigon-lib/state/aggregator_fuzz_test.go
index 268479c09f9..103bc56a358 100644
--- a/erigon-lib/state/aggregator_fuzz_test.go
+++ b/erigon-lib/state/aggregator_fuzz_test.go
@@ -19,7 +19,20 @@
package state
import (
+ "context"
+ "encoding/binary"
"testing"
+ "time"
+
+ "github.com/c2h5oh/datasize"
+ "github.com/erigontech/erigon-lib/common"
+ "github.com/erigontech/erigon-lib/common/datadir"
+ "github.com/erigontech/erigon-lib/common/length"
+ "github.com/erigontech/erigon-lib/kv"
+ "github.com/erigontech/erigon-lib/kv/mdbx"
+ "github.com/erigontech/erigon-lib/log/v3"
+ "github.com/erigontech/erigon-lib/types"
+ "github.com/holiman/uint256"
"github.com/stretchr/testify/require"
)
@@ -38,3 +51,238 @@ func Fuzz_BtreeIndex_Allocation(f *testing.F) {
})
}
+
+func Fuzz_AggregatorV3_Merge(f *testing.F) {
+ db, agg := testFuzzDbAndAggregatorv3(f, 10)
+ rwTx, err := db.BeginRwNosync(context.Background())
+ require.NoError(f, err)
+ defer func() {
+ if rwTx != nil {
+ rwTx.Rollback()
+ }
+ }()
+
+ ac := agg.BeginFilesRo()
+ defer ac.Close()
+ domains, err := NewSharedDomains(WrapTxWithCtx(rwTx, ac), log.New())
+ require.NoError(f, err)
+ defer domains.Close()
+
+ const txs = uint64(1000)
+
+ var (
+ commKey1 = []byte("someCommKey")
+ commKey2 = []byte("otherCommKey")
+ )
+
+ // keys are encodings of numbers 1..31
+ // each key changes value on every txNum which is multiple of the key
+ var maxWrite, otherMaxWrite uint64
+ //f.Add([]common.Address{common.HexToAddress("0x123"), common.HexToAddress("0x456")})
+ //f.Add([]common.Hash{common.HexToHash("0x123"), common.HexToHash("0x456")})
+ f.Fuzz(func(t *testing.T, data []byte) {
+ if len(data) < int(txs*(length.Addr+length.Hash)) {
+ t.Skip()
+ }
+ addrData := data[:txs*length.Addr]
+ locData := data[txs*length.Addr : txs*(length.Addr+length.Hash)]
+ addrs := make([]common.Address, 1000)
+ for i := 0; i < 1000; i++ {
+ copy(addrs[i][:], addrData[i*length.Addr:(i+1)*length.Addr])
+ }
+ locs := make([]common.Address, 1000)
+ for i := 0; i < 1000; i++ {
+ copy(locs[i][:], locData[i*length.Hash:(i+1)*length.Hash])
+ }
+ for txNum := uint64(1); txNum <= txs; txNum++ {
+ domains.SetTxNum(txNum)
+
+ buf := types.EncodeAccountBytesV3(1, uint256.NewInt(0), nil, 0)
+ err = domains.DomainPut(kv.AccountsDomain, addrs[txNum].Bytes(), nil, buf, nil, 0)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.StorageDomain, addrs[txNum].Bytes(), locs[txNum].Bytes(), []byte{addrs[txNum].Bytes()[0], locs[txNum].Bytes()[0]}, nil, 0)
+ require.NoError(t, err)
+
+ var v [8]byte
+ binary.BigEndian.PutUint64(v[:], txNum)
+ if txNum%135 == 0 {
+ pv, step, _, err := ac.GetLatest(kv.CommitmentDomain, commKey2, nil, rwTx)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.CommitmentDomain, commKey2, nil, v[:], pv, step)
+ require.NoError(t, err)
+ otherMaxWrite = txNum
+ } else {
+ pv, step, _, err := ac.GetLatest(kv.CommitmentDomain, commKey1, nil, rwTx)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.CommitmentDomain, commKey1, nil, v[:], pv, step)
+ require.NoError(t, err)
+ maxWrite = txNum
+ }
+ require.NoError(t, err)
+
+ }
+
+ err = domains.Flush(context.Background(), rwTx)
+ require.NoError(t, err)
+
+ require.NoError(t, err)
+ err = rwTx.Commit()
+ require.NoError(t, err)
+ rwTx = nil
+
+ err = agg.BuildFiles(txs)
+ require.NoError(t, err)
+
+ rwTx, err = db.BeginRw(context.Background())
+ require.NoError(t, err)
+ defer rwTx.Rollback()
+
+ logEvery := time.NewTicker(30 * time.Second)
+ defer logEvery.Stop()
+ stat, err := ac.Prune(context.Background(), rwTx, 0, logEvery)
+ require.NoError(t, err)
+ t.Logf("Prune: %s", stat)
+
+ err = rwTx.Commit()
+ require.NoError(t, err)
+
+ err = agg.MergeLoop(context.Background())
+ require.NoError(t, err)
+
+ // Check the history
+ roTx, err := db.BeginRo(context.Background())
+ require.NoError(t, err)
+ defer roTx.Rollback()
+
+ dc := agg.BeginFilesRo()
+
+ v, _, ex, err := dc.GetLatest(kv.CommitmentDomain, commKey1, nil, roTx)
+ require.NoError(t, err)
+ require.Truef(t, ex, "key %x not found", commKey1)
+
+ require.EqualValues(t, maxWrite, binary.BigEndian.Uint64(v[:]))
+
+ v, _, ex, err = dc.GetLatest(kv.CommitmentDomain, commKey2, nil, roTx)
+ require.NoError(t, err)
+ require.Truef(t, ex, "key %x not found", commKey2)
+ dc.Close()
+
+ require.EqualValues(t, otherMaxWrite, binary.BigEndian.Uint64(v[:]))
+ })
+
+}
+
+func Fuzz_AggregatorV3_MergeValTransform(f *testing.F) {
+ db, agg := testFuzzDbAndAggregatorv3(f, 10)
+ rwTx, err := db.BeginRwNosync(context.Background())
+ require.NoError(f, err)
+ defer func() {
+ if rwTx != nil {
+ rwTx.Rollback()
+ }
+ }()
+ ac := agg.BeginFilesRo()
+ defer ac.Close()
+ domains, err := NewSharedDomains(WrapTxWithCtx(rwTx, ac), log.New())
+ require.NoError(f, err)
+ defer domains.Close()
+
+ const txs = uint64(1000)
+
+ agg.commitmentValuesTransform = true
+
+ state := make(map[string][]byte)
+
+ // keys are encodings of numbers 1..31
+ // each key changes value on every txNum which is multiple of the key
+ //var maxWrite, otherMaxWrite uint64
+ f.Fuzz(func(t *testing.T, data []byte) {
+ if len(data) < int(txs*(length.Addr+length.Hash)) {
+ t.Skip()
+ }
+ addrData := data[:txs*length.Addr]
+ locData := data[txs*length.Addr : txs*(length.Addr+length.Hash)]
+ addrs := make([]common.Address, 1000)
+ for i := 0; i < 1000; i++ {
+ copy(addrs[i][:], addrData[i*length.Addr:(i+1)*length.Addr])
+ }
+ locs := make([]common.Address, 1000)
+ for i := 0; i < 1000; i++ {
+ copy(locs[i][:], locData[i*length.Hash:(i+1)*length.Hash])
+ }
+ for txNum := uint64(1); txNum <= txs; txNum++ {
+ domains.SetTxNum(txNum)
+
+ buf := types.EncodeAccountBytesV3(1, uint256.NewInt(txNum*1e6), nil, 0)
+ err = domains.DomainPut(kv.AccountsDomain, addrs[txNum].Bytes(), nil, buf, nil, 0)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.StorageDomain, addrs[txNum].Bytes(), locs[txNum].Bytes(), []byte{addrs[txNum].Bytes()[0], locs[txNum].Bytes()[0]}, nil, 0)
+ require.NoError(t, err)
+
+ if (txNum+1)%agg.StepSize() == 0 {
+ _, err := domains.ComputeCommitment(context.Background(), true, txNum/10, "")
+ require.NoError(t, err)
+ }
+
+ state[string(addrs[txNum].Bytes())] = buf
+ state[string(addrs[txNum].Bytes())+string(locs[txNum].Bytes())] = []byte{addrs[txNum].Bytes()[0], locs[txNum].Bytes()[0]}
+ }
+
+ err = domains.Flush(context.Background(), rwTx)
+ require.NoError(t, err)
+
+ err = rwTx.Commit()
+ require.NoError(t, err)
+ rwTx = nil
+
+ err = agg.BuildFiles(txs)
+ require.NoError(t, err)
+
+ ac.Close()
+ ac = agg.BeginFilesRo()
+ defer ac.Close()
+
+ rwTx, err = db.BeginRwNosync(context.Background())
+ require.NoError(t, err)
+ defer func() {
+ if rwTx != nil {
+ rwTx.Rollback()
+ }
+ }()
+
+ logEvery := time.NewTicker(30 * time.Second)
+ defer logEvery.Stop()
+ stat, err := ac.Prune(context.Background(), rwTx, 0, logEvery)
+ require.NoError(t, err)
+ t.Logf("Prune: %s", stat)
+
+ err = rwTx.Commit()
+ require.NoError(t, err)
+
+ err = agg.MergeLoop(context.Background())
+ require.NoError(t, err)
+ })
+}
+
+func testFuzzDbAndAggregatorv3(f *testing.F, aggStep uint64) (kv.RwDB, *Aggregator) {
+ f.Helper()
+ require := require.New(f)
+ dirs := datadir.New(f.TempDir())
+ logger := log.New()
+ db := mdbx.NewMDBX(logger).InMem(dirs.Chaindata).GrowthStep(32 * datasize.MB).MapSize(2 * datasize.GB).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
+ return kv.ChaindataTablesCfg
+ }).MustOpen()
+ f.Cleanup(db.Close)
+
+ agg, err := NewAggregator(context.Background(), dirs, aggStep, db, logger)
+ require.NoError(err)
+ f.Cleanup(agg.Close)
+ err = agg.OpenFolder()
+ require.NoError(err)
+ agg.DisableFsync()
+ return db, agg
+}
diff --git a/erigon-lib/state/aggregator_test.go b/erigon-lib/state/aggregator_test.go
index 9fa2c1ac84e..d53de8a77e0 100644
--- a/erigon-lib/state/aggregator_test.go
+++ b/erigon-lib/state/aggregator_test.go
@@ -319,7 +319,7 @@ func aggregatorV3_RestartOnDatadir(t *testing.T, rc runCfg) {
defer domains.Close()
var latestCommitTxNum uint64
- rnd := rand.New(rand.NewSource(time.Now().Unix()))
+ rnd := newRnd(0)
someKey := []byte("somekey")
txs := (aggStep / 2) * 19
@@ -459,7 +459,7 @@ func TestAggregatorV3_PruneSmallBatches(t *testing.T) {
maxTx := aggStep * 5
t.Logf("step=%d tx_count=%d\n", aggStep, maxTx)
- rnd := rand.New(rand.NewSource(0))
+ rnd := newRnd(0)
generateSharedDomainsUpdates(t, domains, maxTx, rnd, 20, 10, aggStep/2)
@@ -639,7 +639,7 @@ func fillRawdbTxNumsIndexForSharedDomains(t *testing.T, rwTx kv.RwTx, maxTx, com
}
}
-func generateSharedDomainsUpdates(t *testing.T, domains *SharedDomains, maxTxNum uint64, rnd *rand.Rand, keyMaxLen, keysCount, commitEvery uint64) map[string]struct{} {
+func generateSharedDomainsUpdates(t *testing.T, domains *SharedDomains, maxTxNum uint64, rnd *rndGen, keyMaxLen, keysCount, commitEvery uint64) map[string]struct{} {
t.Helper()
usedKeys := make(map[string]struct{}, keysCount*maxTxNum)
for txNum := uint64(1); txNum <= maxTxNum; txNum++ {
@@ -655,14 +655,14 @@ func generateSharedDomainsUpdates(t *testing.T, domains *SharedDomains, maxTxNum
return usedKeys
}
-func generateSharedDomainsUpdatesForTx(t *testing.T, domains *SharedDomains, txNum uint64, rnd *rand.Rand, prevKeys map[string]struct{}, keyMaxLen, keysCount uint64) map[string]struct{} {
+func generateSharedDomainsUpdatesForTx(t *testing.T, domains *SharedDomains, txNum uint64, rnd *rndGen, prevKeys map[string]struct{}, keyMaxLen, keysCount uint64) map[string]struct{} {
t.Helper()
domains.SetTxNum(txNum)
getKey := func() ([]byte, bool) {
- r := rnd.Intn(100)
+ r := rnd.IntN(100)
if r < 50 && len(prevKeys) > 0 {
- ri := rnd.Intn(len(prevKeys))
+ ri := rnd.IntN(len(prevKeys))
for k := range prevKeys {
if ri == 0 {
return []byte(k), true
@@ -681,7 +681,7 @@ func generateSharedDomainsUpdatesForTx(t *testing.T, domains *SharedDomains, txN
for j := uint64(0); j < keysCount; j++ {
key, existed := getKey()
- r := rnd.Intn(101)
+ r := rnd.IntN(101)
switch {
case r <= 33:
buf := types.EncodeAccountBytesV3(txNum, uint256.NewInt(txNum*100_000), nil, 0)
@@ -694,7 +694,7 @@ func generateSharedDomainsUpdatesForTx(t *testing.T, domains *SharedDomains, txN
require.NoError(t, err)
case r > 33 && r <= 66:
- codeUpd := make([]byte, rnd.Intn(24576))
+ codeUpd := make([]byte, rnd.IntN(24576))
_, err := rnd.Read(codeUpd)
require.NoError(t, err)
for limit := 1000; len(key) > length.Addr && limit > 0; limit-- {
@@ -779,7 +779,7 @@ func TestAggregatorV3_RestartOnFiles(t *testing.T) {
txs := aggStep * 5
t.Logf("step=%d tx_count=%d\n", aggStep, txs)
- rnd := rand.New(rand.NewSource(0))
+ rnd := newRnd(0)
keys := make([][]byte, txs)
for txNum := uint64(1); txNum <= txs; txNum++ {
@@ -918,7 +918,7 @@ func TestAggregatorV3_ReplaceCommittedKeys(t *testing.T) {
txs := (aggStep) * StepsInColdFile
t.Logf("step=%d tx_count=%d", aggStep, txs)
- rnd := rand.New(rand.NewSource(0))
+ rnd := newRnd(0)
keys := make([][]byte, txs/2)
var prev1, prev2 []byte
@@ -1032,7 +1032,7 @@ func pivotKeysFromKV(dataPath string) ([][]byte, error) {
func generateKV(tb testing.TB, tmp string, keySize, valueSize, keyCount int, logger log.Logger, compressFlags seg.FileCompression) string {
tb.Helper()
- rnd := rand.New(rand.NewSource(0))
+ rnd := newRnd(0)
values := make([]byte, valueSize)
dataPath := path.Join(tmp, fmt.Sprintf("%dk.kv", keyCount/1000))
@@ -1052,7 +1052,7 @@ func generateKV(tb testing.TB, tmp string, keySize, valueSize, keyCount int, log
binary.BigEndian.PutUint64(key[keySize-8:], uint64(i))
require.NoError(tb, err)
- n, err = rnd.Read(values[:rnd.Intn(valueSize)+1])
+ n, err = rnd.Read(values[:rnd.IntN(valueSize)+1])
require.NoError(tb, err)
err = collector.Collect(key, values[:n])
@@ -1114,7 +1114,7 @@ func testDbAndAggregatorv3(t *testing.T, aggStep uint64) (kv.RwDB, *Aggregator)
func generateInputData(tb testing.TB, keySize, valueSize, keyCount int) ([][]byte, [][]byte) {
tb.Helper()
- rnd := rand.New(rand.NewSource(0))
+ rnd := newRnd(0)
values := make([][]byte, keyCount)
keys := make([][]byte, keyCount)
@@ -1125,7 +1125,7 @@ func generateInputData(tb testing.TB, keySize, valueSize, keyCount int) ([][]byt
require.NoError(tb, err)
keys[i] = common.Copy(bk[:n])
- n, err = rnd.Read(bv[:rnd.Intn(valueSize)+1])
+ n, err = rnd.Read(bv[:rnd.IntN(valueSize)+1])
require.NoError(tb, err)
values[i] = common.Copy(bv[:n])
diff --git a/erigon-lib/state/bps_tree.go b/erigon-lib/state/bps_tree.go
index d039f8ef5dd..188d7723590 100644
--- a/erigon-lib/state/bps_tree.go
+++ b/erigon-lib/state/bps_tree.go
@@ -25,14 +25,13 @@ import (
"time"
"unsafe"
- "github.com/erigontech/erigon-lib/common/dbg"
-
"github.com/c2h5oh/datasize"
- "github.com/erigontech/erigon-lib/seg"
"github.com/erigontech/erigon-lib/common"
+ "github.com/erigontech/erigon-lib/common/dbg"
"github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon-lib/recsplit/eliasfano32"
+ "github.com/erigontech/erigon-lib/seg"
)
// nolint
diff --git a/erigon-lib/state/cache.go b/erigon-lib/state/cache.go
index af76f2cd495..0c16e2c41e6 100644
--- a/erigon-lib/state/cache.go
+++ b/erigon-lib/state/cache.go
@@ -30,7 +30,7 @@ type domainGetFromFileCacheItem struct {
}
var (
- domainGetFromFileCacheLimit = uint32(dbg.EnvInt("D_LRU", 10_000))
+ domainGetFromFileCacheLimit = uint32(dbg.EnvInt("D_LRU", 1_000))
domainGetFromFileCacheTrace = dbg.EnvBool("D_LRU_TRACE", false)
domainGetFromFileCacheEnabled = dbg.EnvBool("D_LRU_ENABLED", true)
)
diff --git a/erigon-lib/state/domain_shared_bench_test.go b/erigon-lib/state/domain_shared_bench_test.go
index 927255bbaba..2c8700e1734 100644
--- a/erigon-lib/state/domain_shared_bench_test.go
+++ b/erigon-lib/state/domain_shared_bench_test.go
@@ -19,7 +19,6 @@ package state
import (
"context"
"encoding/binary"
- "math/rand"
"testing"
"github.com/stretchr/testify/require"
@@ -46,8 +45,7 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) {
defer domains.Close()
maxTx := stepSize * 258
- seed := int64(4500)
- rnd := rand.New(rand.NewSource(seed))
+ rnd := newRnd(4500)
keys := make([][]byte, 8)
for i := 0; i < len(keys); i++ {
@@ -104,7 +102,7 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) {
for ik := 0; ik < t.N; ik++ {
for i := 0; i < len(keys); i++ {
- ts := uint64(rnd.Intn(int(maxTx)))
+ ts := uint64(rnd.IntN(int(maxTx)))
v, ok, err := ac2.HistorySeek(kv.AccountsHistory, keys[i], ts, rwTx)
require.True(t, ok)
diff --git a/erigon-lib/state/domain_shared_test.go b/erigon-lib/state/domain_shared_test.go
index 583c72f860b..2ca7088f7ee 100644
--- a/erigon-lib/state/domain_shared_test.go
+++ b/erigon-lib/state/domain_shared_test.go
@@ -20,7 +20,6 @@ import (
"context"
"encoding/binary"
"fmt"
- "math/rand"
"testing"
"time"
@@ -53,7 +52,7 @@ func TestSharedDomain_CommitmentKeyReplacement(t *testing.T) {
require.NoError(t, err)
defer domains.Close()
- rnd := rand.New(rand.NewSource(2342))
+ rnd := newRnd(2342)
maxTx := stepSize * 8
// 1. generate data
@@ -134,7 +133,7 @@ func TestSharedDomain_Unwind(t *testing.T) {
maxTx := stepSize
hashes := make([][]byte, maxTx)
count := 10
- rnd := rand.New(rand.NewSource(0))
+ rnd := newRnd(0)
ac.Close()
err = rwTx.Commit()
require.NoError(t, err)
@@ -180,7 +179,7 @@ Loop:
err = domains.Flush(ctx, rwTx)
require.NoError(t, err)
- unwindTo := uint64(commitStep * rnd.Intn(int(maxTx)/commitStep))
+ unwindTo := uint64(commitStep * rnd.IntN(int(maxTx)/commitStep))
domains.currentChangesAccumulator = nil
acu := agg.BeginFilesRo()
diff --git a/erigon-lib/state/domain_test.go b/erigon-lib/state/domain_test.go
index 53285aaf022..8ef5eebaf0c 100644
--- a/erigon-lib/state/domain_test.go
+++ b/erigon-lib/state/domain_test.go
@@ -24,7 +24,8 @@ import (
"fmt"
"io/fs"
"math"
- "math/rand"
+ randOld "math/rand"
+ "math/rand/v2"
"os"
"path/filepath"
"sort"
@@ -51,6 +52,20 @@ import (
"github.com/erigontech/erigon-lib/types"
)
+type rndGen struct {
+ *rand.Rand
+ oldGen *randOld.Rand
+}
+
+func newRnd(seed uint64) *rndGen {
+ return &rndGen{
+ Rand: rand.New(rand.NewChaCha8([32]byte{byte(seed)})),
+ oldGen: randOld.New(randOld.NewSource(int64(seed))),
+ }
+}
+func (r *rndGen) IntN(n int) int { return int(r.Uint64N(uint64(n))) }
+func (r *rndGen) Read(p []byte) (n int, err error) { return r.oldGen.Read(p) } // seems `go1.22` doesn't have `Read` method on `math/v2` generator
+
func testDbAndDomain(t *testing.T, logger log.Logger) (kv.RwDB, *Domain) {
t.Helper()
return testDbAndDomainOfStep(t, 16, logger)
@@ -1309,10 +1324,7 @@ func generateTestDataForDomainCommitment(tb testing.TB, keySize1, keySize2, tota
tb.Helper()
doms := make(map[string]map[string][]upd)
- seed := 31
- //seed := time.Now().Unix()
- defer tb.Logf("generated data with seed %d, keys %d", seed, keyLimit)
- r := rand.New(rand.NewSource(0))
+ r := newRnd(31)
accs := make(map[string][]upd)
stor := make(map[string][]upd)
@@ -1340,11 +1352,7 @@ func generateTestData(tb testing.TB, keySize1, keySize2, totalTx, keyTxsLimit, k
tb.Helper()
data := make(map[string][]upd)
- //seed := time.Now().Unix()
- seed := 31
- defer tb.Logf("generated data with seed %d, keys %d", seed, keyLimit)
-
- r := rand.New(rand.NewSource(0))
+ r := newRnd(31)
if keyLimit == 1 {
key1 := generateRandomKey(r, keySize1)
data[key1] = generateUpdates(r, totalTx, keyTxsLimit)
@@ -1360,24 +1368,24 @@ func generateTestData(tb testing.TB, keySize1, keySize2, totalTx, keyTxsLimit, k
return data
}
-func generateRandomKey(r *rand.Rand, size uint64) string {
+func generateRandomKey(r *rndGen, size uint64) string {
return string(generateRandomKeyBytes(r, size))
}
-func generateRandomKeyBytes(r *rand.Rand, size uint64) []byte {
+func generateRandomKeyBytes(r *rndGen, size uint64) []byte {
key := make([]byte, size)
r.Read(key)
return key
}
-func generateAccountUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd {
+func generateAccountUpdates(r *rndGen, totalTx, keyTxsLimit uint64) []upd {
updates := make([]upd, 0)
usedTxNums := make(map[uint64]bool)
for i := uint64(0); i < keyTxsLimit; i++ {
txNum := generateRandomTxNum(r, totalTx, usedTxNums)
- jitter := r.Intn(10e7)
+ jitter := r.IntN(10e7)
value := types.EncodeAccountBytesV3(i, uint256.NewInt(i*10e4+uint64(jitter)), nil, 0)
updates = append(updates, upd{txNum: txNum, value: value})
@@ -1388,7 +1396,7 @@ func generateAccountUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd {
return updates
}
-func generateArbitraryValueUpdates(r *rand.Rand, totalTx, keyTxsLimit, maxSize uint64) []upd {
+func generateArbitraryValueUpdates(r *rndGen, totalTx, keyTxsLimit, maxSize uint64) []upd {
updates := make([]upd, 0)
usedTxNums := make(map[uint64]bool)
//maxStorageSize := 24 * (1 << 10) // limit on contract code
@@ -1396,7 +1404,7 @@ func generateArbitraryValueUpdates(r *rand.Rand, totalTx, keyTxsLimit, maxSize u
for i := uint64(0); i < keyTxsLimit; i++ {
txNum := generateRandomTxNum(r, totalTx, usedTxNums)
- value := make([]byte, r.Intn(int(maxSize)))
+ value := make([]byte, r.IntN(int(maxSize)))
r.Read(value)
updates = append(updates, upd{txNum: txNum, value: value})
@@ -1407,7 +1415,7 @@ func generateArbitraryValueUpdates(r *rand.Rand, totalTx, keyTxsLimit, maxSize u
return updates
}
-func generateUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd {
+func generateUpdates(r *rndGen, totalTx, keyTxsLimit uint64) []upd {
updates := make([]upd, 0)
usedTxNums := make(map[uint64]bool)
@@ -1424,10 +1432,10 @@ func generateUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd {
return updates
}
-func generateRandomTxNum(r *rand.Rand, maxTxNum uint64, usedTxNums map[uint64]bool) uint64 {
- txNum := uint64(r.Intn(int(maxTxNum)))
+func generateRandomTxNum(r *rndGen, maxTxNum uint64, usedTxNums map[uint64]bool) uint64 {
+ txNum := uint64(r.IntN(int(maxTxNum)))
for usedTxNums[txNum] {
- txNum = uint64(r.Intn(int(maxTxNum)))
+ txNum = uint64(r.IntN(int(maxTxNum)))
}
return txNum
@@ -1508,8 +1516,6 @@ func TestDomain_CanPruneAfterAggregation(t *testing.T) {
aggStep := uint64(25)
db, d := testDbAndDomainOfStep(t, aggStep, log.New())
- defer db.Close()
- defer d.Close()
tx, err := db.BeginRw(context.Background())
require.NoError(t, err)
diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go
index fab8c6dd9ea..9146e971b20 100644
--- a/erigon-lib/state/inverted_index.go
+++ b/erigon-lib/state/inverted_index.go
@@ -1323,9 +1323,9 @@ func (it *InvertedIterator1) advanceInFiles() {
}
if !bytes.Equal(key, it.key) {
ef, _ := eliasfano32.ReadEliasFano(val)
- min := ef.Get(0)
- max := ef.Max()
- if min < it.endTxNum && max >= it.startTxNum { // Intersection of [min; max) and [it.startTxNum; it.endTxNum)
+ _min := ef.Get(0)
+ _max := ef.Max()
+ if _min < it.endTxNum && _max >= it.startTxNum { // Intersection of [min; max) and [it.startTxNum; it.endTxNum)
it.key = key
it.nextFileKey = key
return
diff --git a/erigon-lib/tools/golangci_lint.sh b/erigon-lib/tools/golangci_lint.sh
index ada4234150d..4c812bc72b9 100755
--- a/erigon-lib/tools/golangci_lint.sh
+++ b/erigon-lib/tools/golangci_lint.sh
@@ -2,7 +2,7 @@
scriptDir=$(dirname "${BASH_SOURCE[0]}")
scriptName=$(basename "${BASH_SOURCE[0]}")
-version="v1.59.1"
+version="v1.60.0"
if [[ "$1" == "--install-deps" ]]
then
diff --git a/erigon-lib/types/ssz/ssz.go b/erigon-lib/types/ssz/ssz.go
index 60800543ae1..40d5ad3a19a 100644
--- a/erigon-lib/types/ssz/ssz.go
+++ b/erigon-lib/types/ssz/ssz.go
@@ -85,7 +85,7 @@ func UnmarshalUint64SSZ(x []byte) uint64 {
return binary.LittleEndian.Uint64(x)
}
-func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint64, version int) ([]T, error) {
+func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, _max uint64, version int) ([]T, error) {
if start > end || len(bytes) < int(end) {
return nil, ErrBadOffset
}
@@ -96,7 +96,7 @@ func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint6
elementsNum = currentOffset / 4
}
inPos := 4
- if uint64(elementsNum) > max {
+ if uint64(elementsNum) > _max {
return nil, ErrTooBigList
}
objs := make([]T, elementsNum)
@@ -121,7 +121,7 @@ func DecodeDynamicList[T Unmarshaler](bytes []byte, start, end uint32, max uint6
return objs, nil
}
-func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement uint32, max uint64, version int) ([]T, error) {
+func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement uint32, _max uint64, version int) ([]T, error) {
if start > end || len(bytes) < int(end) {
return nil, ErrBadOffset
}
@@ -131,7 +131,7 @@ func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement u
if uint32(len(buf))%bytesPerElement != 0 {
return nil, ErrBufferNotRounded
}
- if elementsNum > max {
+ if elementsNum > _max {
return nil, ErrTooBigList
}
objs := make([]T, elementsNum)
@@ -144,7 +144,7 @@ func DecodeStaticList[T Unmarshaler](bytes []byte, start, end, bytesPerElement u
return objs, nil
}
-func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error) {
+func DecodeHashList(bytes []byte, start, end, _max uint32) ([]common.Hash, error) {
if start > end || len(bytes) < int(end) {
return nil, ErrBadOffset
}
@@ -154,7 +154,7 @@ func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error)
if uint32(len(buf))%length.Hash != 0 {
return nil, ErrBufferNotRounded
}
- if elementsNum > max {
+ if elementsNum > _max {
return nil, ErrTooBigList
}
objs := make([]common.Hash, elementsNum)
@@ -164,7 +164,7 @@ func DecodeHashList(bytes []byte, start, end, max uint32) ([]common.Hash, error)
return objs, nil
}
-func DecodeNumbersList(bytes []byte, start, end uint32, max uint64) ([]uint64, error) {
+func DecodeNumbersList(bytes []byte, start, end uint32, _max uint64) ([]uint64, error) {
if start > end || len(bytes) < int(end) {
return nil, ErrBadOffset
}
@@ -174,7 +174,7 @@ func DecodeNumbersList(bytes []byte, start, end uint32, max uint64) ([]uint64, e
if uint64(len(buf))%length.BlockNum != 0 {
return nil, ErrBufferNotRounded
}
- if elementsNum > max {
+ if elementsNum > _max {
return nil, ErrTooBigList
}
objs := make([]uint64, elementsNum)
@@ -195,12 +195,12 @@ func CalculateIndiciesLimit(maxCapacity, numItems, size uint64) uint64 {
return numItems
}
-func DecodeString(bytes []byte, start, end, max uint64) ([]byte, error) {
+func DecodeString(bytes []byte, start, end, _max uint64) ([]byte, error) {
if start > end || len(bytes) < int(end) {
return nil, ErrBadOffset
}
buf := bytes[start:end]
- if uint64(len(buf)) > max {
+ if uint64(len(buf)) > _max {
return nil, ErrTooBigList
}
return buf, nil
diff --git a/eth/stagedsync/exec3_parallel.go b/eth/stagedsync/exec3_parallel.go
index c65c83c863c..79aa06c6bcb 100644
--- a/eth/stagedsync/exec3_parallel.go
+++ b/eth/stagedsync/exec3_parallel.go
@@ -4,11 +4,12 @@ import (
"context"
"errors"
"fmt"
- chaos_monkey "github.com/erigontech/erigon/tests/chaos-monkey"
"sync"
"sync/atomic"
"time"
+ chaos_monkey "github.com/erigontech/erigon/tests/chaos-monkey"
+
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/log/v3"
@@ -335,7 +336,7 @@ func (pe *parallelExecutor) rwLoop(ctx context.Context, maxTxNum uint64, logger
defer tx.Rollback()
pe.doms.SetTx(tx)
- applyCtx, cancelApplyCtx = context.WithCancel(ctx)
+ applyCtx, cancelApplyCtx = context.WithCancel(ctx) //nolint:fatcontext
defer cancelApplyCtx()
pe.applyLoopWg.Add(1)
go pe.applyLoop(applyCtx, maxTxNum, &blockComplete, pe.rwLoopErrCh)
diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go
index d4a12250055..f8e056623e9 100644
--- a/eth/stagedsync/stage_snapshots.go
+++ b/eth/stagedsync/stage_snapshots.go
@@ -758,22 +758,22 @@ func (u *snapshotUploader) init(ctx context.Context, logger log.Logger) {
}
func (u *snapshotUploader) maxUploadedHeader() uint64 {
- var max uint64
+ var _max uint64
if len(u.files) > 0 {
for _, state := range u.files {
if state.local && state.remote {
if state.info != nil {
if state.info.Type.Enum() == coresnaptype.Enums.Headers {
- if state.info.To > max {
- max = state.info.To
+ if state.info.To > _max {
+ _max = state.info.To
}
}
} else {
if info, _, ok := snaptype.ParseFileName(u.cfg.dirs.Snap, state.file); ok {
if info.Type.Enum() == coresnaptype.Enums.Headers {
- if info.To > max {
- max = info.To
+ if info.To > _max {
+ _max = info.To
}
}
state.info = &info
@@ -783,7 +783,7 @@ func (u *snapshotUploader) maxUploadedHeader() uint64 {
}
}
- return max
+ return _max
}
type dirEntry struct {
@@ -1040,25 +1040,25 @@ func (u *snapshotUploader) downloadLatestSnapshots(ctx context.Context, blockNum
}
}
- var min uint64
+ var _min uint64
for _, info := range lastSegments {
if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok {
- if min == 0 || lastInfo.From() < min {
- min = lastInfo.From()
+ if _min == 0 || lastInfo.From() < _min {
+ _min = lastInfo.From()
}
}
}
for segType, info := range lastSegments {
if lastInfo, ok := info.Sys().(downloader.SnapInfo); ok {
- if lastInfo.From() > min {
+ if lastInfo.From() > _min {
for _, ent := range entries {
if info, err := ent.Info(); err == nil {
snapInfo, ok := info.Sys().(downloader.SnapInfo)
if ok && snapInfo.Type().Enum() == segType &&
- snapInfo.From() == min {
+ snapInfo.From() == _min {
lastSegments[segType] = info
}
}
@@ -1088,17 +1088,17 @@ func (u *snapshotUploader) maxSeedableHeader() uint64 {
}
func (u *snapshotUploader) minBlockNumber() uint64 {
- var min uint64
+ var _min uint64
if list, err := snaptype.Segments(u.cfg.dirs.Snap); err == nil {
for _, info := range list {
- if u.seedable(info) && min == 0 || info.From < min {
- min = info.From
+ if u.seedable(info) && _min == 0 || info.From < _min {
+ _min = info.From
}
}
}
- return min
+ return _min
}
func expandHomeDir(dirpath string) string {
diff --git a/go.mod b/go.mod
index ff79c4af55e..be8789ea804 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,6 @@ require (
github.com/Giulio2002/bls v0.0.0-20241013174947-019133587795
github.com/Masterminds/sprig/v3 v3.2.3
github.com/RoaringBitmap/roaring v1.9.4
- github.com/alecthomas/atomic v0.1.0-alpha2
github.com/alecthomas/kong v0.8.1
github.com/anacrolix/sync v0.5.1
github.com/anacrolix/torrent v1.52.6-0.20231201115409-7ea994b6bbd8
@@ -42,7 +41,7 @@ require (
github.com/go-test/deep v1.1.1
github.com/goccy/go-json v0.9.11
github.com/gofrs/flock v0.12.1
- github.com/golang-jwt/jwt/v4 v4.5.0
+ github.com/golang-jwt/jwt/v4 v4.5.1
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/btree v1.1.3
github.com/google/cel-go v0.18.2
@@ -107,6 +106,7 @@ require (
)
require (
+ github.com/alecthomas/atomic v0.1.0-alpha2 // indirect
github.com/elastic/go-freelru v0.13.0 // indirect
github.com/erigontech/speedtest v0.0.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
diff --git a/go.sum b/go.sum
index 6661c7a2b0c..021e462064e 100644
--- a/go.sum
+++ b/go.sum
@@ -354,8 +354,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
-github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
+github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index 3fb9a240d31..330fd7ac484 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -745,8 +745,8 @@ func contains(ns []*node, id enode.ID) bool {
}
// pushNode adds n to the front of list, keeping at most max items.
-func pushNode(list []*node, n *node, max int) ([]*node, *node) {
- if len(list) < max {
+func pushNode(list []*node, n *node, _max int) ([]*node, *node) {
+ if len(list) < _max {
list = append(list, nil)
}
removed := list[len(list)-1]
diff --git a/p2p/netutil/iptrack.go b/p2p/netutil/iptrack.go
index c902bf97dbc..36b3a1df5b9 100644
--- a/p2p/netutil/iptrack.go
+++ b/p2p/netutil/iptrack.go
@@ -82,15 +82,15 @@ func (it *IPTracker) PredictEndpoint() string {
// The current strategy is simple: find the endpoint with most statements.
counts := make(map[string]int)
- maxcount, max := 0, ""
+ maxcount, _max := 0, ""
for _, s := range it.statements {
c := counts[s.endpoint] + 1
counts[s.endpoint] = c
if c > maxcount && c >= it.minStatements {
- maxcount, max = c, s.endpoint
+ maxcount, _max = c, s.endpoint
}
}
- return max
+ return _max
}
// AddStatement records that a certain host thinks our external endpoint is the one given.
diff --git a/polygon/bor/bordb/prune.go b/polygon/bor/bordb/prune.go
index b088089cff6..219afe108c8 100644
--- a/polygon/bor/bordb/prune.go
+++ b/polygon/bor/bordb/prune.go
@@ -103,7 +103,7 @@ func UnwindHeimdall(ctx context.Context, heimdallStore heimdall.Store, bridgeSto
}
}
- if !unwindCfg.KeepEventProcessedBlocks {
+ if !unwindCfg.KeepEventProcessedBlocks && unwindCfg.Astrid {
if err := bridge.UnwindEventProcessedBlocks(tx, unwindPoint); err != nil {
return err
}
diff --git a/polygon/bor/valset/validator_set.go b/polygon/bor/valset/validator_set.go
index 6599c9f8f6c..1cc5e9e7c29 100644
--- a/polygon/bor/valset/validator_set.go
+++ b/polygon/bor/valset/validator_set.go
@@ -190,20 +190,20 @@ func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 {
panic("empty validator set")
}
- max := int64(math.MinInt64)
- min := int64(math.MaxInt64)
+ _max := int64(math.MinInt64)
+ _min := int64(math.MaxInt64)
for _, v := range vals.Validators {
- if v.ProposerPriority < min {
- min = v.ProposerPriority
+ if v.ProposerPriority < _min {
+ _min = v.ProposerPriority
}
- if v.ProposerPriority > max {
- max = v.ProposerPriority
+ if v.ProposerPriority > _max {
+ _max = v.ProposerPriority
}
}
- diff := max - min
+ diff := _max - _min
if diff < 0 {
return -1 * diff
diff --git a/polygon/heimdall/client.go b/polygon/heimdall/client.go
index c39062473cd..5cffd3b1997 100644
--- a/polygon/heimdall/client.go
+++ b/polygon/heimdall/client.go
@@ -486,9 +486,6 @@ func FetchWithRetryEx[T any](
) (result *T, err error) {
attempt := 0
// create a new ticker for retrying the request
- if client.retryBackOff < apiHeimdallTimeout {
- client.retryBackOff = apiHeimdallTimeout + time.Second*2
- }
ticker := time.NewTicker(client.retryBackOff)
defer ticker.Stop()
diff --git a/tests/difficulty_test.go b/tests/difficulty_test.go
index daab9e53e8e..c301d94ff70 100644
--- a/tests/difficulty_test.go
+++ b/tests/difficulty_test.go
@@ -28,8 +28,6 @@ import (
)
func TestDifficulty(t *testing.T) {
- //t.Parallel()
-
dt := new(testMatcher)
dt.walk(t, difficultyTestDir, func(t *testing.T, name string, superTest map[string]json.RawMessage) {
diff --git a/tests/fuzzers/difficulty/difficulty-fuzz.go b/tests/fuzzers/difficulty/difficulty-fuzz.go
index 360d8581bd6..9e7b82d96b5 100644
--- a/tests/fuzzers/difficulty/difficulty-fuzz.go
+++ b/tests/fuzzers/difficulty/difficulty-fuzz.go
@@ -45,11 +45,11 @@ func (f *fuzzer) read(size int) []byte {
return out
}
-func (f *fuzzer) readSlice(min, max int) []byte {
+func (f *fuzzer) readSlice(_min, _max int) []byte {
var a uint16
//nolint:errcheck
binary.Read(f.input, binary.LittleEndian, &a)
- size := min + int(a)%(max-min)
+ size := _min + int(a)%(_max-_min)
out := make([]byte, size)
if _, err := f.input.Read(out); err != nil {
f.exhausted = true
@@ -57,15 +57,15 @@ func (f *fuzzer) readSlice(min, max int) []byte {
return out
}
-func (f *fuzzer) readUint64(min, max uint64) uint64 {
- if min == max {
- return min
+func (f *fuzzer) readUint64(_min, _max uint64) uint64 {
+ if _min == _max {
+ return _min
}
var a uint64
if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil {
f.exhausted = true
}
- a = min + a%(max-min)
+ a = _min + a%(_max-_min)
return a
}
func (f *fuzzer) readBool() bool {
diff --git a/tests/init_test.go b/tests/init_test.go
index a3a28f110f1..27bbcda2ea1 100644
--- a/tests/init_test.go
+++ b/tests/init_test.go
@@ -228,7 +228,6 @@ func (tm *testMatcher) runTestFile(t *testing.T, path, name string, runTest inte
t.Skip("Skipped by whitelist")
}
}
- //t.Parallel()
// Load the file as map[string].
m := makeMapFromTestFunc(runTest)
@@ -289,7 +288,6 @@ func runTestFunc(runTest interface{}, t *testing.T, name string, m reflect.Value
}
func TestMatcherWhitelist(t *testing.T) {
- //t.Parallel()
tm := new(testMatcher)
tm.whitelist("invalid*")
tm.walk(t, rlpTestDir, func(t *testing.T, name string, test *RLPTest) {
diff --git a/tests/rlp_test.go b/tests/rlp_test.go
index f6a907b2ade..25abe33f7e6 100644
--- a/tests/rlp_test.go
+++ b/tests/rlp_test.go
@@ -26,7 +26,6 @@ import (
)
func TestRLP(t *testing.T) {
- //t.Parallel()
tm := new(testMatcher)
tm.walk(t, rlpTestDir, func(t *testing.T, name string, test *RLPTest) {
if err := tm.checkFailure(t, test.Run()); err != nil {
diff --git a/tests/state_test.go b/tests/state_test.go
index 7a5f9b93ddb..9b308a99a57 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -44,7 +44,6 @@ func TestState(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("fix me on win please") // it's too slow on win and stops on macos, need generally improve speed of this tests
}
- //t.Parallel()
st := new(testMatcher)
diff --git a/tests/transaction_test.go b/tests/transaction_test.go
index 1b3ffd32837..af2b25d0a7b 100644
--- a/tests/transaction_test.go
+++ b/tests/transaction_test.go
@@ -28,8 +28,6 @@ import (
)
func TestTransaction(t *testing.T) {
- //t.Parallel()
-
txt := new(testMatcher)
// We don't allow more than uint64 in gas amount
@@ -38,6 +36,7 @@ func TestTransaction(t *testing.T) {
txt.skipLoad("^ttGasLimit/TransactionWithGasLimitxPriceOverflow.json")
txt.walk(t, transactionTestDir, func(t *testing.T, name string, test *TransactionTest) {
+ t.Parallel()
cfg := params.MainnetChainConfig
if err := txt.checkFailure(t, test.Run(cfg.ChainID)); err != nil {
t.Error(err)
diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go
index d932b50ccf9..78e96f62f5e 100644
--- a/turbo/jsonrpc/debug_api.go
+++ b/turbo/jsonrpc/debug_api.go
@@ -21,14 +21,13 @@ import (
"errors"
"fmt"
- jsoniter "github.com/json-iterator/go"
-
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/hexutil"
"github.com/erigontech/erigon-lib/common/hexutility"
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/kv/order"
"github.com/erigontech/erigon-lib/kv/rawdbv3"
+ jsoniter "github.com/json-iterator/go"
"github.com/erigontech/erigon/core/state"
"github.com/erigontech/erigon/core/types/accounts"
@@ -205,48 +204,40 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByNumber(ctx context.Context,
if startNum > endNum {
return nil, fmt.Errorf("start block (%d) must be less than or equal to end block (%d)", startNum, endNum)
}
-
//[from, to)
startTxNum, err := txNumsReader.Min(tx, startNum)
if err != nil {
return nil, err
}
- endTxNum, err := txNumsReader.Max(tx, endNum-1)
+ endTxNum, err := txNumsReader.Min(tx, endNum)
if err != nil {
return nil, err
}
- return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum)
+ return getModifiedAccounts(tx.(kv.TemporalTx), startTxNum, endTxNum)
}
-// getModifiedAccountsV3 returns a list of addresses that were modified in the block range
+// getModifiedAccounts returns a list of addresses that were modified in the block range
// [startNum:endNum)
-func getModifiedAccountsV3(tx kv.TemporalTx, startTxNum, endTxNum uint64) ([]common.Address, error) {
+func getModifiedAccounts(tx kv.TemporalTx, startTxNum, endTxNum uint64) ([]common.Address, error) {
it, err := tx.HistoryRange(kv.AccountsHistory, int(startTxNum), int(endTxNum), order.Asc, kv.Unlim)
if err != nil {
return nil, err
}
defer it.Close()
- changedAddrs := make(map[common.Address]struct{})
+ var result []common.Address
+ saw := make(map[common.Address]struct{})
for it.HasNext() {
k, _, err := it.Next()
if err != nil {
return nil, err
}
- changedAddrs[common.BytesToAddress(k)] = struct{}{}
- }
-
- if len(changedAddrs) == 0 {
- return nil, nil
- }
-
- idx := 0
- result := make([]common.Address, len(changedAddrs))
- for addr := range changedAddrs {
- copy(result[idx][:], addr[:])
- idx++
+ //TODO: data is sorted, enough to compare with prevKey
+ if _, ok := saw[common.BytesToAddress(k)]; !ok {
+ saw[common.BytesToAddress(k)] = struct{}{}
+ result = append(result, common.BytesToAddress(k))
+ }
}
-
return result, nil
}
@@ -294,7 +285,7 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByHash(ctx context.Context, s
if err != nil {
return nil, err
}
- return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum)
+ return getModifiedAccounts(tx.(kv.TemporalTx), startTxNum, endTxNum)
}
func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, address common.Address) (*AccountResult, error) {
diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go
index 3dbb7b5b69a..ba23694e56a 100644
--- a/turbo/jsonrpc/eth_receipts.go
+++ b/turbo/jsonrpc/eth_receipts.go
@@ -303,10 +303,6 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end
continue
}
blockHash = header.Hash()
-
- if err != nil {
- return nil, err
- }
exec.ChangeBlock(header)
}
diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go
index 744b6900d23..5b681954f2c 100644
--- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go
+++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go
@@ -146,7 +146,7 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64,
// this is one off code to fix an issue in 2.49.x->2.52.x which missed
// removal of intermediate segments after a merge operation
-func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) {
+func removeBorOverlaps(dir string, active []snaptype.FileInfo, _max uint64) {
list, err := snaptype.Segments(dir)
if err != nil {
@@ -165,12 +165,12 @@ func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) {
// added overhead to make sure we don't delete in the
// current 500k block segment
- if max > 500_001 {
- max -= 500_001
+ if _max > 500_001 {
+ _max -= 500_001
}
for _, f := range l {
- if max < f.From {
+ if _max < f.From {
continue
}
diff --git a/turbo/snapshotsync/snapshots.go b/turbo/snapshotsync/snapshots.go
index 3fc4d054cc3..da86afbc070 100644
--- a/turbo/snapshotsync/snapshots.go
+++ b/turbo/snapshotsync/snapshots.go
@@ -838,7 +838,7 @@ func (s *RoSnapshots) dirtyIdxAvailability(segtype snaptype.Enum) uint64 {
return 0
}
- var max uint64
+ var _max uint64
dirty.Walk(func(segments []*DirtySegment) bool {
for _, seg := range segments {
@@ -846,30 +846,30 @@ func (s *RoSnapshots) dirtyIdxAvailability(segtype snaptype.Enum) uint64 {
break
}
- max = seg.to - 1
+ _max = seg.to - 1
}
return true
})
- return max
+ return _max
}
func (s *RoSnapshots) visibleIdxAvailability(segtype snaptype.Enum) uint64 {
tx := s.ViewType(segtype.Type())
defer tx.Close()
- var max uint64
+ var _max uint64
for _, seg := range tx.Segments {
if !seg.IsIndexed() {
break
}
- max = seg.to - 1
+ _max = seg.to - 1
}
- return max
+ return _max
}
func (s *RoSnapshots) Ls() {