From 367caa33ef5ab618ea357189e88044dbdbd17776 Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Thu, 21 Sep 2023 17:13:31 +0200 Subject: [PATCH] chore: bump to v0.34.29 of comet (#1087) Signed-off-by: dependabot[bot] Signed-off-by: Thane Thomson Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Sergio Mena Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jasmina Malicevic Co-authored-by: Lasaro Co-authored-by: Thane Thomson Co-authored-by: mmsqe Co-authored-by: yihuang Co-authored-by: Steven Ferrer Co-authored-by: Chill Validation <92176880+chillyvee@users.noreply.github.com> --- .../improvements/1210-close-evidence-db.md | 1 + .../857-make-handshake-cancelable.md | 1 + .../771-kvindexer-parsing-big-ints.md | 2 + .../bug-fixes/771-pubsub-parsing-big-ints.md | 4 + .../654-rpc-rm-response-data-logs.md | 3 + .../security-fixes/788-rpc-client-pw.md | 3 + .../794-cli-debug-kill-unsafe-cast.md | 2 + .../865-fix-peerstate-marshaljson.md | 3 + .../security-fixes/890-mempool-fix-cache.md | 3 + .changelog/v0.34.29/summary.md | 4 + .github/workflows/check-generated.yml | 2 +- .github/workflows/cometbft-docker.yml | 6 +- .github/workflows/coverage.yml | 8 +- .github/workflows/e2e-manual.yml | 2 +- .github/workflows/e2e-nightly-34x.yml | 6 +- .github/workflows/e2e.yml | 2 +- .github/workflows/fuzz-nightly.yml | 4 +- .github/workflows/lint.yml | 2 +- .github/workflows/markdown-linter.yml | 2 +- .github/workflows/pre-release.yml | 6 +- .github/workflows/proto-lint.yml | 4 +- .github/workflows/release-version.yml | 2 +- .github/workflows/release.yml | 4 +- .github/workflows/testapp-docker.yml | 8 +- .github/workflows/tests.yml | 6 +- CHANGELOG.md | 36 ++++ cmd/cometbft/commands/debug/kill.go | 6 +- cmd/cometbft/commands/root_test.go | 58 +++---- consensus/reactor.go | 3 +- consensus/reactor_test.go | 30 ++++ consensus/replay.go | 30 +++- consensus/state.go | 15 +- consensus/state_test.go | 159 ++++++++++++------ docs/app-dev/indexing-transactions.md | 11 ++ docs/core/subscription.md | 14 ++ libs/pubsub/query/query.go | 101 ++++++----- libs/pubsub/query/query_test.go | 76 ++++++++- mempool/v0/clist_mempool.go | 14 ++ mempool/v0/clist_mempool_test.go | 46 +++++ node/node.go | 11 +- rpc/jsonrpc/client/http_json_client.go | 6 + rpc/jsonrpc/server/http_json_handler.go | 6 +- rpc/jsonrpc/server/http_server.go | 4 +- rpc/jsonrpc/server/http_uri_handler.go | 8 +- rpc/openapi/openapi.yaml | 6 +- state/execution.go | 2 +- state/indexer/block/kv/kv.go | 15 +- state/indexer/block/kv/kv_test.go | 129 ++++++++++++++ state/indexer/block/kv/util.go | 9 +- state/indexer/query_range.go | 8 +- state/txindex/kv/kv.go | 17 +- state/txindex/kv/kv_test.go | 71 ++++++++ state/txindex/kv/utils.go | 7 +- version/version.go | 2 +- 54 files changed, 781 insertions(+), 209 deletions(-) create mode 100644 .changelog/unreleased/improvements/1210-close-evidence-db.md create mode 100644 .changelog/unreleased/improvements/857-make-handshake-cancelable.md create mode 100644 .changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md create mode 100644 .changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md create mode 100644 .changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md create mode 100644 .changelog/v0.34.29/security-fixes/788-rpc-client-pw.md create mode 100644 .changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md create mode 100644 .changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md create mode 100644 .changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md create mode 100644 .changelog/v0.34.29/summary.md diff --git a/.changelog/unreleased/improvements/1210-close-evidence-db.md b/.changelog/unreleased/improvements/1210-close-evidence-db.md new file mode 100644 index 0000000000..e32bc87dbe --- /dev/null +++ b/.changelog/unreleased/improvements/1210-close-evidence-db.md @@ -0,0 +1 @@ +- `[node]` Close evidence.db OnStop ([cometbft/cometbft\#1210](https://github.com/cometbft/cometbft/pull/1210): @chillyvee) diff --git a/.changelog/unreleased/improvements/857-make-handshake-cancelable.md b/.changelog/unreleased/improvements/857-make-handshake-cancelable.md new file mode 100644 index 0000000000..16b447f6d2 --- /dev/null +++ b/.changelog/unreleased/improvements/857-make-handshake-cancelable.md @@ -0,0 +1 @@ +- `[node]` Make handshake cancelable ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) diff --git a/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md b/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md new file mode 100644 index 0000000000..4a0000db6d --- /dev/null +++ b/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md @@ -0,0 +1,2 @@ +- `[state/kvindex]` Querying event attributes that are bigger than int64 is now + enabled. ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md b/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md new file mode 100644 index 0000000000..fc5f25a90f --- /dev/null +++ b/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md @@ -0,0 +1,4 @@ +- `[pubsub]` Pubsub queries are now able to parse big integers (larger than + int64). Very big floats are also properly parsed into very big integers + instead of being truncated to int64. + ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md b/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md new file mode 100644 index 0000000000..3fddfee8e7 --- /dev/null +++ b/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md @@ -0,0 +1,3 @@ +- `[rpc]` Remove response data from response failure logs in order + to prevent large quantities of log data from being produced + ([\#654](https://github.com/cometbft/cometbft/issues/654)) \ No newline at end of file diff --git a/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md b/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md new file mode 100644 index 0000000000..430b7b5ac4 --- /dev/null +++ b/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md @@ -0,0 +1,3 @@ +- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC + client credentials from being inadvertently dumped to logs + ([\#788](https://github.com/cometbft/cometbft/pull/788)) diff --git a/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md b/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md new file mode 100644 index 0000000000..782eccd9d5 --- /dev/null +++ b/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md @@ -0,0 +1,2 @@ +- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in + `debug kill` command ([\#794](https://github.com/cometbft/cometbft/pull/794)) diff --git a/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md b/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md new file mode 100644 index 0000000000..fdd9172c20 --- /dev/null +++ b/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md @@ -0,0 +1,3 @@ +- `[consensus]` **Low severity** - Avoid recursive call after rename to + `(*PeerState).MarshalJSON` + ([\#863](https://github.com/cometbft/cometbft/pull/863)) diff --git a/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md b/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md new file mode 100644 index 0000000000..bad30efc7a --- /dev/null +++ b/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md @@ -0,0 +1,3 @@ +- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from + appearing twice in the mempool + ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) diff --git a/.changelog/v0.34.29/summary.md b/.changelog/v0.34.29/summary.md new file mode 100644 index 0000000000..7ecb273940 --- /dev/null +++ b/.changelog/v0.34.29/summary.md @@ -0,0 +1,4 @@ +*June 14, 2023* + +Provides several minor bug fixes, as well as fixes for several low-severity +security issues. diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index ba206e4964..8b6705b1fd 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -45,7 +45,7 @@ jobs: with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 1 # we need a .git directory to run git diff diff --git a/.github/workflows/cometbft-docker.yml b/.github/workflows/cometbft-docker.yml index f4cf3f6f10..e70c65469d 100644 --- a/.github/workflows/cometbft-docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -14,7 +14,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare id: prep run: | @@ -40,10 +40,10 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v3.0.0 - name: Build but do not Publish to Docker Hub - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v5 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 8e009b138a..60fe9a6b84 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -9,7 +9,7 @@ jobs: split-test-files: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Create a file with all the pkgs run: go list ./... > pkgs.txt - name: Split pkgs into 4 files @@ -46,7 +46,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -68,7 +68,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -92,7 +92,7 @@ jobs: runs-on: ubuntu-latest needs: tests steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index bca2861687..5b93752694 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -18,7 +18,7 @@ jobs: with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 4845e97c94..dfa9b88526 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -25,7 +25,7 @@ jobs: with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: 'v0.34.x-celestia' @@ -49,7 +49,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} SLACK_CHANNEL: cometbft-engineering @@ -65,7 +65,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on success - uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} SLACK_CHANNEL: cometbft-engineering diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 2cb134aa90..f265593c8b 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -15,7 +15,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index c2fcda24c7..7e78fd0182 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -13,7 +13,7 @@ jobs: with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install go-fuzz working-directory: test/fuzz @@ -72,7 +72,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack if any crashers - uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} SLACK_CHANNEL: cometbft-engineering diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 33c46137d2..2a69042d98 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: '1.19' diff --git a/.github/workflows/markdown-linter.yml b/.github/workflows/markdown-linter.yml index bdbd7f2c33..6eeda3bc05 100644 --- a/.github/workflows/markdown-linter.yml +++ b/.github/workflows/markdown-linter.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Lint Code Base uses: docker://github/super-linter:v4 env: diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 2a9872bb9d..a7db3aae6c 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -44,7 +44,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for changes available in this pre-release, but not yet officially released." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v5 with: version: latest args: release --clean --release-notes ../release_notes.md @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack upon pre-release - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index 7579d8efb5..8ca233b00e 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -14,8 +14,8 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 5 steps: - - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.17.0 + - uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.26.1 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/release-version.yml b/.github/workflows/release-version.yml index 034d191f21..c7c977e4a3 100644 --- a/.github/workflows/release-version.yml +++ b/.github/workflows/release-version.yml @@ -11,7 +11,7 @@ jobs: check-version: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f3a9dd4e93..da1fe53d40 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -26,7 +26,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for this release." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v5 with: version: latest args: release --clean --release-notes ../release_notes.md diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml index 4a7efb141e..e9f84e525a 100644 --- a/.github/workflows/testapp-docker.yml +++ b/.github/workflows/testapp-docker.yml @@ -16,7 +16,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare id: prep run: | @@ -42,17 +42,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v3.0.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v3.0.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v5.0.0 with: context: . file: ./test/e2e/docker/Dockerfile diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 64bb85e744..38e5caa668 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -58,7 +58,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -90,7 +90,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/CHANGELOG.md b/CHANGELOG.md index b89bd25153..7ecaaaa713 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,41 @@ # CHANGELOG +## v0.34.29 + +*June 14, 2023* + +Provides several minor bug fixes, as well as fixes for several low-severity +security issues. + +### BUG FIXES + +- `[state/kvindex]` Querying event attributes that are bigger than int64 is now + enabled. ([\#771](https://github.com/cometbft/cometbft/pull/771)) +- `[pubsub]` Pubsub queries are now able to parse big integers (larger than + int64). Very big floats are also properly parsed into very big integers + instead of being truncated to int64. + ([\#771](https://github.com/cometbft/cometbft/pull/771)) + +### IMPROVEMENTS + +- `[rpc]` Remove response data from response failure logs in order + to prevent large quantities of log data from being produced + ([\#654](https://github.com/cometbft/cometbft/issues/654)) + +### SECURITY FIXES + +- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC + client credentials from being inadvertently dumped to logs + ([\#788](https://github.com/cometbft/cometbft/pull/788)) +- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in + `debug kill` command ([\#794](https://github.com/cometbft/cometbft/pull/794)) +- `[consensus]` **Low severity** - Avoid recursive call after rename to + `(*PeerState).MarshalJSON` + ([\#863](https://github.com/cometbft/cometbft/pull/863)) +- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from + appearing twice in the mempool + ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) + ## v0.34.28 *April 26, 2023* diff --git a/cmd/cometbft/commands/debug/kill.go b/cmd/cometbft/commands/debug/kill.go index 8e51e28993..8964bb9a13 100644 --- a/cmd/cometbft/commands/debug/kill.go +++ b/cmd/cometbft/commands/debug/kill.go @@ -33,7 +33,7 @@ $ cometbft debug 34255 /path/to/tm-debug.zip`, } func killCmdHandler(cmd *cobra.Command, args []string) error { - pid, err := strconv.ParseUint(args[0], 10, 64) + pid, err := strconv.Atoi(args[0]) if err != nil { return err } @@ -100,7 +100,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // is tailed and piped to a file under the directory dir. An error is returned // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. -func killProc(pid uint64, dir string) error { +func killProc(pid int, dir string) error { // pipe STDERR output from tailing the CometBFT process to a file // // NOTE: This will only work on UNIX systems. @@ -123,7 +123,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the CometBFT process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(int(pid)) + p, err := os.FindProcess(pid) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill CometBFT process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { diff --git a/cmd/cometbft/commands/root_test.go b/cmd/cometbft/commands/root_test.go index 72604240ce..4c22b56516 100644 --- a/cmd/cometbft/commands/root_test.go +++ b/cmd/cometbft/commands/root_test.go @@ -17,28 +17,12 @@ import ( cmtos "github.com/tendermint/tendermint/libs/os" ) -var defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") - // clearConfig clears env vars, the given root dir, and resets viper. -func clearConfig(dir string) { - if err := os.Unsetenv("CMTHOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("CMT_HOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("TMHOME"); err != nil { - //XXX: Deprecated. - panic(err) - } - if err := os.Unsetenv("TM_HOME"); err != nil { - //XXX: Deprecated. - panic(err) - } +func clearConfig(t *testing.T, dir string) { + os.Clearenv() + err := os.RemoveAll(dir) + require.NoError(t, err) - if err := os.RemoveAll(dir); err != nil { - panic(err) - } viper.Reset() config = cfg.DefaultConfig() } @@ -56,11 +40,11 @@ func testRootCmd() *cobra.Command { return rootCmd } -func testSetup(rootDir string, args []string, env map[string]string) error { - clearConfig(defaultRoot) +func testSetup(t *testing.T, root string, args []string, env map[string]string) error { + clearConfig(t, root) rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "CMT", defaultRoot) + cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) // run with the args and env args = append([]string{rootCmd.Use}, args...) @@ -68,22 +52,27 @@ func testSetup(rootDir string, args []string, env map[string]string) error { } func TestRootHome(t *testing.T) { - newRoot := filepath.Join(defaultRoot, "something-else") + tmpDir := os.TempDir() + root := filepath.Join(tmpDir, "adir") + newRoot := filepath.Join(tmpDir, "something-else") + defer clearConfig(t, root) + defer clearConfig(t, newRoot) + cases := []struct { args []string env map[string]string root string }{ - {nil, nil, defaultRoot}, + {nil, nil, root}, {[]string{"--home", newRoot}, nil, newRoot}, {nil, map[string]string{"TMHOME": newRoot}, newRoot}, //XXX: Deprecated. {nil, map[string]string{"CMTHOME": newRoot}, newRoot}, } for i, tc := range cases { - idxString := strconv.Itoa(i) + idxString := "idx: " + strconv.Itoa(i) - err := testSetup(defaultRoot, tc.args, tc.env) + err := testSetup(t, root, tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.root, config.RootDir, idxString) @@ -115,8 +104,10 @@ func TestRootFlagsEnv(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - - err := testSetup(defaultRoot, tc.args, tc.env) + root := filepath.Join(os.TempDir(), "adir2_"+idxString) + idxString = "idx: " + idxString + defer clearConfig(t, root) + err := testSetup(t, root, tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.logLevel, config.LogLevel, idxString) @@ -144,10 +135,11 @@ func TestRootConfig(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - clearConfig(defaultRoot) - + root := filepath.Join(os.TempDir(), "adir3_"+idxString) + idxString = "idx: " + idxString + defer clearConfig(t, root) // XXX: path must match cfg.defaultConfigPath - configFilePath := filepath.Join(defaultRoot, "config") + configFilePath := filepath.Join(root, "config") err := cmtos.EnsureDir(configFilePath, 0o700) require.Nil(t, err) @@ -157,7 +149,7 @@ func TestRootConfig(t *testing.T) { require.Nil(t, err) rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "CMT", defaultRoot) + cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) // run with the args and env tc.args = append([]string{rootCmd.Use}, tc.args...) diff --git a/consensus/reactor.go b/consensus/reactor.go index eb662c6af2..f0c1937c45 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -1099,7 +1099,8 @@ func (ps *PeerState) MarshalJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - return cmtjson.Marshal(ps) + type jsonPeerState PeerState + return cmtjson.Marshal((*jsonPeerState)(ps)) } // GetHeight returns an atomic snapshot of the PeerRoundState's height diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index b6134c7ce0..a82aa38859 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -2,6 +2,7 @@ package consensus import ( "context" + "encoding/json" "fmt" "os" "path" @@ -1043,3 +1044,32 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { }) } } + +func TestMarshalJSONPeerState(t *testing.T) { + ps := NewPeerState(nil) + data, err := json.Marshal(ps) + require.NoError(t, err) + require.JSONEq(t, `{ + "round_state":{ + "height": "0", + "round": -1, + "step": 0, + "start_time": "0001-01-01T00:00:00Z", + "proposal": false, + "proposal_block_part_set_header": + {"total":0, "hash":""}, + "proposal_block_parts": null, + "proposal_pol_round": -1, + "proposal_pol": null, + "prevotes": null, + "precommits": null, + "last_commit_round": -1, + "last_commit": null, + "catchup_commit_round": -1, + "catchup_commit": null + }, + "stats":{ + "votes":"0", + "block_parts":"0"} + }`, string(data)) +} diff --git a/consensus/replay.go b/consensus/replay.go index 4d82d331f3..586ddebf80 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -2,6 +2,7 @@ package consensus import ( "bytes" + "context" "fmt" "hash/crc32" "io" @@ -239,6 +240,11 @@ func (h *Handshaker) NBlocks() int { // TODO: retry the handshake/replay if it fails ? func (h *Handshaker) Handshake(proxyApp proxy.AppConns) (string, error) { + return h.HandshakeWithContext(context.TODO(), proxyApp) +} + +// HandshakeWithContext is cancellable version of Handshake +func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.AppConns) (string, error) { // Handshake is done via ABCI Info on the query conn. res, err := proxyApp.Query().InfoSync(proxy.RequestInfo) @@ -265,7 +271,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) (string, error) { } // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + appHash, err = h.ReplayBlocksWithContext(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { return "", fmt.Errorf("error on replay: %v", err) } @@ -286,6 +292,17 @@ func (h *Handshaker) ReplayBlocks( appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns, +) ([]byte, error) { + return h.ReplayBlocksWithContext(context.TODO(), state, appHash, appBlockHeight, proxyApp) +} + +// ReplayBlocksWithContext is cancellable version of ReplayBlocks. +func (h *Handshaker) ReplayBlocksWithContext( + ctx context.Context, + state sm.State, + appHash []byte, + appBlockHeight int64, + proxyApp proxy.AppConns, ) ([]byte, error) { storeBlockBase := h.store.Base() storeBlockHeight := h.store.Height() @@ -390,7 +407,7 @@ func (h *Handshaker) ReplayBlocks( // Either the app is asking for replay, or we're all synced up. if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false) } else if appBlockHeight == storeBlockHeight { // We're good! @@ -405,7 +422,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight < stateBlockHeight: // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, true) case appBlockHeight == stateBlockHeight: // We haven't run Commit (both the state and app are one block behind), @@ -435,6 +452,7 @@ func (h *Handshaker) ReplayBlocks( } func (h *Handshaker) replayBlocks( + ctx context.Context, state sm.State, proxyApp proxy.AppConns, appBlockHeight, @@ -461,6 +479,12 @@ func (h *Handshaker) replayBlocks( firstBlock = state.InitialHeight } for i := firstBlock; i <= finalBlock; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + h.logger.Info("Applying block", "height", i) block := h.store.LoadBlock(i) // Extra check to ensure the app was not changed in a way it shouldn't have. diff --git a/consensus/state.go b/consensus/state.go index f637d750d9..bbbb1d3d44 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1008,7 +1008,7 @@ func (cs *State) enterNewRound(height int64, round int32) { logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now) } - logger.Debug("entering new round", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + prevHeight, prevRound, prevStep := cs.Height, cs.Round, cs.Step // increment validators if necessary validators := cs.Validators @@ -1022,17 +1022,23 @@ func (cs *State) enterNewRound(height int64, round int32) { // but we fire an event, so update the round step first cs.updateRoundStep(round, cstypes.RoundStepNewRound) cs.Validators = validators + propAddress := validators.GetProposer().PubKey.Address() if round == 0 { // We've already reset these upon new height, // and meanwhile we might have received a proposal // for round 0. } else { - logger.Debug("resetting proposal info") + logger.Info("resetting proposal info", "proposer", propAddress) cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil } + logger.Debug("entering new round", + "previous", log.NewLazySprintf("%v/%v/%v", prevHeight, prevRound, prevStep), + "proposer", propAddress, + ) + cs.Votes.SetRound(cmtmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping cs.TriggeredTimeoutPrecommit = false @@ -1863,7 +1869,8 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { p := proposal.ToProto() // Verify signature - if !cs.Validators.GetProposer().PubKey.VerifySignature( + pubKey := cs.Validators.GetProposer().PubKey + if !pubKey.VerifySignature( types.ProposalSignBytes(cs.state.ChainID, p), proposal.Signature, ) { return ErrInvalidProposalSignature @@ -1878,7 +1885,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } - cs.Logger.Info("received proposal", "proposal", proposal) + cs.Logger.Info("received proposal", "proposal", proposal, "proposer", pubKey.Address()) return nil } diff --git a/consensus/state_test.go b/consensus/state_test.go index 3110f67d50..5e3a76c545 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "strings" "testing" "time" @@ -240,61 +241,83 @@ func TestStateBadProposal(t *testing.T) { } func TestStateOversizedBlock(t *testing.T) { - cs1, vss := randState(2) - cs1.state.ConsensusParams.Block.MaxBytes = 2000 - height, round := cs1.Height, cs1.Round - vs2 := vss[1] - - partSize := types.BlockPartSizeBytes - - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - - propBlock, _ := cs1.createProposalBlock() - propBlock.Data.Txs = []types.Tx{cmtrand.Bytes(2001)} - propBlock.Header.DataHash = propBlock.Data.Hash() - - // make the second validator the proposer by incrementing round - round++ - incrementRound(vss[1:]...) - - propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(height, round, -1, blockID) - p := proposal.ToProto() - if err := vs2.SignProposal(config.ChainID(), p); err != nil { - t.Fatal("failed to sign bad proposal", err) + const maxBytes = 2000 + + for _, testCase := range []struct { + name string + oversized bool + }{ + { + name: "max size, correct block", + oversized: false, + }, + { + name: "off-by-1 max size, incorrect block", + oversized: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + cs1, vss := randState(2) + cs1.state.ConsensusParams.Block.MaxBytes = maxBytes + height, round := cs1.Height, cs1.Round + vs2 := vss[1] + + partSize := types.BlockPartSizeBytes + + propBlock, propBlockParts := findBlockSizeLimit(t, height, maxBytes, cs1, partSize, testCase.oversized) + + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + + // make the second validator the proposer by incrementing round + round++ + incrementRound(vss[1:]...) + + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal := types.NewProposal(height, round, -1, blockID) + p := proposal.ToProto() + if err := vs2.SignProposal(config.ChainID(), p); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + proposal.Signature = p.Signature + + totalBytes := 0 + for i := 0; i < int(propBlockParts.Total()); i++ { + part := propBlockParts.GetPart(i) + totalBytes += len(part.Bytes) + } + + if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + // start the machine + startTestRound(cs1, height, round) + + t.Log("Block Sizes;", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) + + validateHash := propBlock.Hash() + lockedRound := int32(1) + if testCase.oversized { + validateHash = nil + lockedRound = -1 + // if the block is oversized cs1 should log an error with the block part message as it exceeds + // the consensus params. The block is not added to cs.ProposalBlock so the node timeouts. + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + // and then should send nil prevote and precommit regardless of whether other validators prevote and + // precommit on it + } + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], validateHash) + + signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevote(voteCh, height, round) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, lockedRound, vss[0], validateHash, validateHash) + + signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + }) } - proposal.Signature = p.Signature - - totalBytes := 0 - for i := 0; i < int(propBlockParts.Total()); i++ { - part := propBlockParts.GetPart(i) - totalBytes += len(part.Bytes) - } - - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - - // start the machine - startTestRound(cs1, height, round) - - t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) - - // c1 should log an error with the block part message as it exceeds the consensus params. The - // block is not added to cs.ProposalBlock so the node timeouts. - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) - - // and then should send nil prevote and precommit regardless of whether other validators prevote and - // precommit on it - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } //---------------------------------------------------------------------------------------------------- @@ -1914,3 +1937,31 @@ func subscribeUnBuffered(eventBus *types.EventBus, q cmtpubsub.Query) <-chan cmt } return sub.Out() } + +func findBlockSizeLimit(t *testing.T, height, maxBytes int64, cs *State, partSize uint32, oversized bool) (*types.Block, *types.PartSet) { + var offset int64 + if !oversized { + offset = -2 + } + softMaxDataBytes := int(types.MaxDataBytes(maxBytes, 0, 0)) + for i := softMaxDataBytes; i < softMaxDataBytes*2; i++ { + propBlock, propBlockParts := cs.state.MakeBlock( + height, + types.Data{Txs: []types.Tx{[]byte("a=" + strings.Repeat("o", i-2))}}, + &types.Commit{}, + nil, + cs.privValidatorPubKey.Address(), + ) + + if propBlockParts.ByteSize() > maxBytes+offset { + s := "real max" + if oversized { + s = "off-by-1" + } + t.Log("Detected "+s+" data size for block;", "size", i, "softMaxDataBytes", softMaxDataBytes) + return propBlock, propBlockParts + } + } + require.Fail(t, "We shouldn't hit the end of the loop") + return nil, nil +} diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index caf789caf4..4d64e8ae08 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -267,3 +267,14 @@ is ignored and the data is retrieved as if `match_events=false`. Additionally, if a node that was running Tendermint Core when the data was first indexed, and switched to CometBFT, is queried, it will retrieve this previously indexed data as if `match_events=false` (attributes can match the query conditions across different events on the same height). + + +# Event attribute value types + +Users can use anything as an event value. However, if the event attribute value is a number, the following restrictions apply: + +- Negative numbers will not be properly retrieved when querying the indexer +- When querying the events using `tx_search` and `block_search`, the value given as part of the condition cannot be a float. +- Any event value retrieved from the database will be represented as a `BigInt` (from `math/big`) +- Floating point values are not read from the database even with the introduction of `BigInt`. This was intentionally done +to keep the same beheaviour as was historically present and not introduce breaking changes. This will be fixed in the 0.38 series. diff --git a/docs/core/subscription.md b/docs/core/subscription.md index 3a5d60cd16..796a415ff1 100644 --- a/docs/core/subscription.md +++ b/docs/core/subscription.md @@ -40,6 +40,20 @@ You can also use tags, given you had included them into DeliverTx response, to query transaction results. See [Indexing transactions](./indexing-transactions.md) for details. + +## Query parameter and event type restrictions + +While CometBFT imposes no restrictions on the application with regards to the type of +the event output, there are several restrictions when it comes to querying +events whose attribute values are numeric. + +- Queries cannot include negative numbers +- If floating points are compared to integers, they are converted to an integer +- Floating point to floating point comparison leads to a loss of precision for very big floating point numbers +(e.g., `10000000000000000000.0` is treated the same as `10000000000000000000.6`) +- When floating points do get converted to integers, they are always rounded down. +This has been done to preserve the behaviour present before introducing the support for BigInts in the query parameters. + ## ValidatorSetUpdates When validator set changes, ValidatorSetUpdates event is published. The diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index 83829cbe49..1819c542fa 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -11,6 +11,7 @@ package query import ( "fmt" + "math/big" "reflect" "regexp" "strconv" @@ -151,16 +152,17 @@ func (q *Query) Conditions() ([]Condition, error) { conditions = append(conditions, Condition{eventAttr, op, value}) } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", - err, number, + valueBig := new(big.Int) + _, ok := valueBig.SetString(number, 10) + if !ok { + err := fmt.Errorf( + "problem parsing %s as bigint (should never happen if the grammar is correct)", + number, ) return nil, err } + conditions = append(conditions, Condition{eventAttr, op, valueBig}) - conditions = append(conditions, Condition{eventAttr, op, value}) } case ruletime: @@ -298,11 +300,12 @@ func (q *Query) Matches(events map[string][]string) (bool, error) { return false, nil } } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", - err, number, + value := new(big.Int) + _, ok := value.SetString(number, 10) + if !ok { + err := fmt.Errorf( + "problem parsing %s as bigInt (should never happen if the grammar is correct)", + number, ) return false, err } @@ -451,42 +454,58 @@ func matchValue(value string, op Operator, operand reflect.Value) (bool, error) return v == operandFloat64, nil } - case reflect.Int64: - var v int64 + case reflect.Pointer: - operandInt := operand.Interface().(int64) - filteredValue := numRegex.FindString(value) + switch operand.Interface().(type) { + case *big.Int: + filteredValue := numRegex.FindString(value) + operandVal := operand.Interface().(*big.Int) + v := new(big.Int) + if strings.ContainsAny(filteredValue, ".") { + // We do this just to check whether the string can be parsed as a float + _, err := strconv.ParseFloat(filteredValue, 64) + if err != nil { + err = fmt.Errorf( + "got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", + err, filteredValue, + ) + return false, err + } - // if value looks like float, we try to parse it as float - if strings.ContainsAny(filteredValue, ".") { - v1, err := strconv.ParseFloat(filteredValue, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to float64: %w", filteredValue, err) - } + // If yes, we get the int part of the string. + // We could simply cast the float to an int and use that to create a big int but + // if it is a number bigger than int64, it will not be parsed properly. + // If we use bigFloat and convert that to a string, the values will be rounded which + // is not what we want either. + // Here we are simulating the behavior that int64(floatValue). This was the default behavior + // before introducing BigInts and we do not want to break the logic in minor releases. + _, ok := v.SetString(strings.Split(filteredValue, ".")[0], 10) + if !ok { + return false, fmt.Errorf("failed to convert value %s from float to big int", filteredValue) + } + } else { + // try our best to convert value from tags to big int + _, ok := v.SetString(filteredValue, 10) + if !ok { + return false, fmt.Errorf("failed to convert value %v from event attribute to big int", filteredValue) + } - v = int64(v1) - } else { - var err error - // try our best to convert value from tags to int64 - v, err = strconv.ParseInt(filteredValue, 10, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to int64: %w", filteredValue, err) } - } + cmpRes := operandVal.Cmp(v) + switch op { + case OpLessEqual: + return cmpRes == 0 || cmpRes == 1, nil + case OpGreaterEqual: + return cmpRes == 0 || cmpRes == -1, nil + case OpLess: + return cmpRes == 1, nil + case OpGreater: + return cmpRes == -1, nil + case OpEqual: + return cmpRes == 0, nil + } - switch op { - case OpLessEqual: - return v <= operandInt, nil - case OpGreaterEqual: - return v >= operandInt, nil - case OpLess: - return v < operandInt, nil - case OpGreater: - return v > operandInt, nil - case OpEqual: - return v == operandInt, nil } - case reflect.String: switch op { case OpEqual: diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go index d511e7fab8..34ea0d0c00 100644 --- a/libs/pubsub/query/query_test.go +++ b/libs/pubsub/query/query_test.go @@ -2,6 +2,7 @@ package query_test import ( "fmt" + "math/big" "testing" "time" @@ -11,6 +12,57 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) +func TestBigNumbers(t *testing.T) { + bigInt := "10000000000000000000" + bigIntAsFloat := "10000000000000000000.0" + bigFloat := "10000000000000000000.6" + bigFloatLowerRounding := "10000000000000000000.1" + doubleBigInt := "20000000000000000000" + + testCases := []struct { + s string + events map[string][]string + err bool + matches bool + matchErr bool + }{ + + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {bigInt}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {bigIntAsFloat}}, false, true, false}, + {"account.balance <= " + doubleBigInt, map[string][]string{"account.balance": {bigInt}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {"10000000000000000001"}}, false, false, false}, + {"account.balance <= " + doubleBigInt, map[string][]string{"account.balance": {bigFloat}}, false, true, false}, + // To maintain compatibility with the old implementation which did a simple cast of float to int64, we do not round the float + // Thus both 10000000000000000000.6 and "10000000000000000000.1 are equal to 10000000000000000000 + // and the test does not find a match + {"account.balance > " + bigInt, map[string][]string{"account.balance": {bigFloat}}, false, false, false}, + {"account.balance > " + bigInt, map[string][]string{"account.balance": {bigFloatLowerRounding}}, true, false, false}, + // This test should also find a match, but floats that are too big cannot be properly converted, thus + // 10000000000000000000.6 gets rounded to 10000000000000000000 + {"account.balance > " + bigIntAsFloat, map[string][]string{"account.balance": {bigFloat}}, false, false, false}, + {"account.balance > 11234.0", map[string][]string{"account.balance": {"11234.6"}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {"1000.45"}}, false, true, false}, + } + + for _, tc := range testCases { + q, err := query.New(tc.s) + if !tc.err { + require.Nil(t, err) + } + require.NotNil(t, q, "Query '%s' should not be nil", tc.s) + + if tc.matches { + match, err := q.Matches(tc.events) + assert.Nil(t, err, "Query '%s' should not error on match %v", tc.s, tc.events) + assert.True(t, match, "Query '%s' should match %v", tc.s, tc.events) + } else { + match, err := q.Matches(tc.events) + assert.Equal(t, tc.matchErr, err != nil, "Unexpected error for query '%s' match %v", tc.s, tc.events) + assert.False(t, match, "Query '%s' should not match %v", tc.s, tc.events) + } + } +} + func TestMatches(t *testing.T) { var ( txDate = "2017-01-01" @@ -180,6 +232,10 @@ func TestConditions(t *testing.T) { txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") require.NoError(t, err) + bigInt := new(big.Int) + bigInt, ok := bigInt.SetString("10000000000000000000", 10) + require.True(t, ok) + testCases := []struct { s string conditions []query.Condition @@ -193,8 +249,24 @@ func TestConditions(t *testing.T) { { s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{ - {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, - {CompositeKey: "tx.gas", Op: query.OpLess, Operand: int64(9)}, + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: big.NewInt(7)}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, + }, + }, + { + + s: "tx.gas > 7.5 AND tx.gas < 9", + conditions: []query.Condition{ + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: 7.5}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, + }, + }, + { + + s: "tx.gas > " + bigInt.String() + " AND tx.gas < 9", + conditions: []query.Condition{ + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: bigInt}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, }, }, { diff --git a/mempool/v0/clist_mempool.go b/mempool/v0/clist_mempool.go index 65365fa63d..07e9cb484c 100644 --- a/mempool/v0/clist_mempool.go +++ b/mempool/v0/clist_mempool.go @@ -395,6 +395,20 @@ func (mem *CListMempool) resCbFirstTime( return } + // Check transaction not already in the mempool + if e, ok := mem.txsMap.Load(types.Tx(tx).Key()); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) + memTx.senders.LoadOrStore(peerID, true) + mem.logger.Debug( + "transaction already there, not adding it again", + "tx", types.Tx(tx).Hash(), + "res", r, + "height", mem.height, + "total", mem.Size(), + ) + return + } + memTx := &mempoolTx{ height: mem.height, gasWanted: r.CheckTx.GasWanted, diff --git a/mempool/v0/clist_mempool_test.go b/mempool/v0/clist_mempool_test.go index 824a24a1af..f00413a89f 100644 --- a/mempool/v0/clist_mempool_test.go +++ b/mempool/v0/clist_mempool_test.go @@ -7,6 +7,7 @@ import ( "fmt" mrand "math/rand" "os" + "strconv" "testing" "time" @@ -641,6 +642,51 @@ func TestMempoolTxsBytes(t *testing.T) { } +func TestMempoolNoCacheOverflow(t *testing.T) { + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) + app := kvstore.NewApplication() + _, server := newRemoteApp(t, sockPath, app) + t.Cleanup(func() { + if err := server.Stop(); err != nil { + t.Error(err) + } + }) + cfg := config.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) + defer cleanup() + + // add tx0 + var tx0 = types.Tx([]byte{0x01}) + err := mp.CheckTx(tx0, nil, mempool.TxInfo{}) + require.NoError(t, err) + err = mp.FlushAppConn() + require.NoError(t, err) + + // saturate the cache to remove tx0 + for i := 1; i <= mp.config.CacheSize; i++ { + err = mp.CheckTx(types.Tx([]byte(strconv.Itoa(i))), nil, mempool.TxInfo{}) + require.NoError(t, err) + } + err = mp.FlushAppConn() + require.NoError(t, err) + assert.False(t, mp.cache.Has(types.Tx([]byte{0x01}))) + + // add again tx0 + err = mp.CheckTx(tx0, nil, mempool.TxInfo{}) + require.NoError(t, err) + err = mp.FlushAppConn() + require.NoError(t, err) + + // tx0 should appear only once in mp.txs + found := 0 + for e := mp.txs.Front(); e != nil; e = e.Next() { + if types.Tx.Key(e.Value.(*mempoolTx).tx) == types.Tx.Key(tx0) { + found++ + } + } + assert.True(t, found == 1) +} + // This will non-deterministically catch some concurrency failures like // https://github.com/tendermint/tendermint/issues/3509 // TODO: all of the tests should probably also run using the remote proxy app diff --git a/node/node.go b/node/node.go index 90d2402c3a..c19efe88c3 100644 --- a/node/node.go +++ b/node/node.go @@ -322,6 +322,7 @@ func createAndStartIndexerService( } func doHandshake( + ctx context.Context, stateStore sm.Store, state sm.State, blockStore sm.BlockStore, @@ -825,7 +826,7 @@ func NewNode(config *cfg.Config, consensusLogger := logger.With("module", "consensus") var softwareVersion string if !stateSync { - softwareVersion, err = doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger) + softwareVersion, err = doHandshake(context.TODO(), stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger) if err != nil { return nil, err } @@ -1134,12 +1135,14 @@ func (n *Node) OnStop() { } if n.blockStore != nil { + n.Logger.Info("Closing blockstore") if err := n.blockStore.Close(); err != nil { n.Logger.Error("problem closing blockstore", "err", err) } } if n.stateStore != nil { + n.Logger.Info("Closing statestore") if err := n.stateStore.Close(); err != nil { n.Logger.Error("problem closing statestore", "err", err) } @@ -1161,6 +1164,12 @@ func (n *Node) OnStop() { } } + if n.evidencePool != nil { + n.Logger.Info("Closing evidencestore") + if err := n.EvidencePool().Close(); err != nil { + n.Logger.Error("problem closing evidencestore", "err", err) + } + } } // ConfigureRPC makes sure RPC has all the objects it needs to operate. diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 9c928f1f04..76fa5063f9 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -139,6 +139,8 @@ var _ HTTPClient = (*Client)(nil) var _ Caller = (*Client)(nil) var _ Caller = (*RequestBatch)(nil) +var _ fmt.Stringer = (*Client)(nil) + // New returns a Client pointed at the given address. // An error is returned on invalid remote. The function panics when remote is nil. func New(remote string) (*Client, error) { @@ -232,6 +234,10 @@ func getHTTPRespErrPrefix(resp *http.Response) string { return fmt.Sprintf("error in json rpc client, with http response metadata: (Status: %s, Protocol %s)", resp.Status, resp.Proto) } +func (c *Client) String() string { + return fmt.Sprintf("&Client{user=%v, addr=%v, client=%v, nextReqID=%v}", c.username, c.address, c.client, c.nextReqID) +} + // NewRequestBatch starts a batch of requests for this client. func (c *Client) NewRequestBatch() *RequestBatch { return &RequestBatch{ diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 00b88e85f2..f12c6fe6fe 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -25,7 +25,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han fmt.Errorf("error reading request body: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, http.StatusBadRequest, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -48,7 +48,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han if err := json.Unmarshal(b, &request); err != nil { res := types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -122,7 +122,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han wErr = WriteRPCResponseHTTP(w, responses...) } if wErr != nil { - logger.Error("failed to write responses", "res", responses, "err", wErr) + logger.Error("failed to write responses", "err", wErr) } } } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 6dd772e3d9..29eae9fc32 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -188,7 +188,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler // If RPCResponse if res, ok := e.(types.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } else { // Panics can contain anything, attempt to normalize it as an error. @@ -207,7 +207,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler res := types.RPCInternalError(types.JSONRPCIntID(-1), err) if wErr := WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } } diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 56f8274c95..ebed6eba43 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -27,7 +27,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit return func(w http.ResponseWriter, r *http.Request) { res := types.RPCMethodNotFoundError(dummyID) if wErr := WriteRPCResponseHTTPError(w, http.StatusNotFound, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } } @@ -45,7 +45,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit fmt.Errorf("error converting http params to arguments: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -58,7 +58,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit if err != nil { if err := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, types.RPCInternalError(dummyID, err)); err != nil { - logger.Error("failed to write response", "res", result, "err", err) + logger.Error("failed to write response", "err", err) return } return @@ -71,7 +71,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit err = WriteRPCResponseHTTP(w, resp) } if err != nil { - logger.Error("failed to write response", "res", result, "err", err) + logger.Error("failed to write response", "err", err) return } } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 343b1f918c..068d006287 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -825,7 +825,7 @@ paths: required: true schema: type: string - example: "tx.height=1000" + example: '"tx.height=1000"' - in: query name: prove description: Include proofs of the transactions inclusion in the block @@ -896,7 +896,7 @@ paths: required: true schema: type: string - example: "block.height > 1000 AND valset.changed > 0" + example: '"block.height > 1000 AND valset.changed > 0"' - in: query name: page description: "Page number (1-based)" @@ -1108,7 +1108,7 @@ paths: required: true schema: type: string - example: "/a/b/c" + example: '"/a/b/c"' - in: query name: data description: Data diff --git a/state/execution.go b/state/execution.go index 237e8c608a..fc5a929af9 100644 --- a/state/execution.go +++ b/state/execution.go @@ -248,7 +248,7 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, 0, err } if len(validatorUpdates) > 0 { - blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) + blockExec.logger.Info("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) } // Update the state with the block and responses. diff --git a/state/indexer/block/kv/kv.go b/state/indexer/block/kv/kv.go index 30a71d05d8..a20e3c7afb 100644 --- a/state/indexer/block/kv/kv.go +++ b/state/indexer/block/kv/kv.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "math/big" "sort" "strconv" "strings" @@ -311,9 +312,10 @@ LOOP: continue } - if _, ok := qr.AnyBound().(int64); ok { - v, err := strconv.ParseInt(eventValue, 10, 64) - if err != nil { + if _, ok := qr.AnyBound().(*big.Int); ok { + v := new(big.Int) + v, ok := v.SetString(eventValue, 10) + if !ok { // If the number was not int it might be a float but this behavior is kept the same as before the patch continue LOOP } @@ -385,15 +387,16 @@ func (idx *BlockerIndexer) setTmpHeights(tmpHeights map[string][]byte, it dbm.It } } -func checkBounds(ranges indexer.QueryRange, v int64) bool { +func checkBounds(ranges indexer.QueryRange, v *big.Int) bool { include := true lowerBound := ranges.LowerBoundValue() upperBound := ranges.UpperBoundValue() - if lowerBound != nil && v < lowerBound.(int64) { + + if lowerBound != nil && v.Cmp(lowerBound.(*big.Int)) == -1 { include = false } - if upperBound != nil && v > upperBound.(int64) { + if upperBound != nil && v.Cmp(upperBound.(*big.Int)) == 1 { include = false } diff --git a/state/indexer/block/kv/kv_test.go b/state/indexer/block/kv/kv_test.go index 506088398a..e3caca6df2 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/state/indexer/block/kv/kv_test.go @@ -358,3 +358,132 @@ func TestBlockIndexerMulti(t *testing.T) { }) } } + +func TestBigInt(t *testing.T) { + + bigInt := "10000000000000000000" + store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + indexer := blockidxkv.New(store) + + require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ + Header: types.Header{Height: 1}, + ResultBeginBlock: abci.ResponseBeginBlock{ + Events: []abci.Event{}, + }, + ResultEndBlock: abci.ResponseEndBlock{ + Events: []abci.Event{ + { + Type: "end_event", + Attributes: []abci.EventAttribute{ + { + Key: []byte("foo"), + Value: []byte("100"), + Index: true, + }, + { + Key: []byte("bar"), + Value: []byte("10000000000000000000.76"), + Index: true, + }, + { + Key: []byte("bar_lower"), + Value: []byte("10000000000000000000.1"), + Index: true, + }, + }, + }, + { + Type: "end_event", + Attributes: []abci.EventAttribute{ + { + Key: []byte("foo"), + Value: []byte(bigInt), + Index: true, + }, + { + Key: []byte("bar"), + Value: []byte("500"), + Index: true, + }, + { + Key: []byte("bla"), + Value: []byte("500.5"), + Index: true, + }, + }, + }, + }, + }, + })) + + testCases := map[string]struct { + q *query.Query + results []int64 + }{ + + "query return all events from a height - exact": { + q: query.MustParse("block.height = 1"), + results: []int64{1}, + }, + "query return all events from a height - exact (deduplicate height)": { + q: query.MustParse("block.height = 1 AND block.height = 2"), + results: []int64{1}, + }, + "query return all events from a height - range": { + q: query.MustParse("block.height < 2 AND block.height > 0 AND block.height > 0"), + results: []int64{1}, + }, + "query matches fields with big int and height - no match": { + q: query.MustParse("end_event.foo = " + bigInt + " AND end_event.bar = 500 AND block.height = 2"), + results: []int64{}, + }, + "query matches fields with big int with less and height - no match": { + q: query.MustParse("end_event.foo <= " + bigInt + " AND end_event.bar = 500 AND block.height = 2"), + results: []int64{}, + }, + "query matches fields with big int and height - match": { + q: query.MustParse("end_event.foo = " + bigInt + " AND end_event.bar = 500 AND block.height = 1"), + results: []int64{1}, + }, + "query matches big int in range": { + q: query.MustParse("end_event.foo = " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float - does not pass as float is not converted to int": { + q: query.MustParse("end_event.bar >= " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float - fails because float is converted to int": { + q: query.MustParse("end_event.bar > " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float lower dec point - fails because float is converted to int": { + q: query.MustParse("end_event.bar_lower > " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float with less - found": { + q: query.MustParse("end_event.foo <= " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float with less with height range - found": { + q: query.MustParse("end_event.foo <= " + bigInt + " AND block.height > 0"), + results: []int64{1}, + }, + "query matches big int in range with float with less - not found": { + q: query.MustParse("end_event.foo < " + bigInt + " AND end_event.foo > 100"), + results: []int64{}, + }, + "query does not parse float": { + q: query.MustParse("end_event.bla >= 500"), + results: []int64{}, + }, + } + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + results, err := indexer.Search(context.Background(), tc.q) + require.NoError(t, err) + require.Equal(t, tc.results, results) + }) + } +} diff --git a/state/indexer/block/kv/util.go b/state/indexer/block/kv/util.go index 8c71afb53d..6b44c8cf67 100644 --- a/state/indexer/block/kv/util.go +++ b/state/indexer/block/kv/util.go @@ -3,6 +3,7 @@ package kv import ( "encoding/binary" "fmt" + "math/big" "strconv" "github.com/google/orderedcode" @@ -135,7 +136,7 @@ func parseEventSeqFromEventKey(key []byte) (int64, error) { func lookForHeight(conditions []query.Condition) (int64, bool, int) { for i, c := range conditions { if c.CompositeKey == types.BlockHeightKey && c.Op == query.OpEqual { - return c.Operand.(int64), true, i + return c.Operand.(*big.Int).Int64(), true, i } } @@ -159,7 +160,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio continue } else { heightCondition = append(heightCondition, c) - heightInfo.height = c.Operand.(int64) + heightInfo.height = c.Operand.(*big.Int).Int64() // As height is assumed to always be int64 found = true } } else { @@ -196,7 +197,7 @@ func dedupMatchEvents(conditions []query.Condition) ([]query.Condition, bool) { for i, c := range conditions { if c.CompositeKey == types.MatchEventKey { // Match events should be added only via RPC as the very first query condition - if i == 0 && c.Op == query.OpEqual && c.Operand.(int64) == 1 { + if i == 0 && c.Op == query.OpEqual && c.Operand.(*big.Int).Int64() == 1 { dedupConditions = append(dedupConditions, c) matchEvents = true } @@ -210,7 +211,7 @@ func dedupMatchEvents(conditions []query.Condition) ([]query.Condition, bool) { func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) bool { if heightInfo.heightRange.Key != "" { - if !checkBounds(heightInfo.heightRange, keyHeight) { + if !checkBounds(heightInfo.heightRange, big.NewInt(keyHeight)) { return false } } else { diff --git a/state/indexer/query_range.go b/state/indexer/query_range.go index 27fab657ef..20ac70cd8d 100644 --- a/state/indexer/query_range.go +++ b/state/indexer/query_range.go @@ -1,6 +1,7 @@ package indexer import ( + "math/big" "time" "github.com/tendermint/tendermint/libs/pubsub/query" @@ -44,6 +45,9 @@ func (qr QueryRange) LowerBoundValue() interface{} { switch t := qr.LowerBound.(type) { case int64: return t + 1 + case *big.Int: + tmp := new(big.Int) + return tmp.Add(t, big.NewInt(1)) case time.Time: return t.Unix() + 1 @@ -67,7 +71,9 @@ func (qr QueryRange) UpperBoundValue() interface{} { switch t := qr.UpperBound.(type) { case int64: return t - 1 - + case *big.Int: + tmp := new(big.Int) + return tmp.Sub(t, big.NewInt(1)) case time.Time: return t.Unix() - 1 diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index b6979e2d52..8199dd054a 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -5,6 +5,7 @@ import ( "context" "encoding/hex" "fmt" + "math/big" "strconv" "strings" @@ -371,7 +372,7 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) func lookForHeight(conditions []query.Condition) (height int64, heightIdx int) { for i, c := range conditions { if c.CompositeKey == types.TxHeightKey && c.Op == query.OpEqual { - return c.Operand.(int64), i + return c.Operand.(*big.Int).Int64(), i } } return 0, -1 @@ -573,9 +574,11 @@ LOOP: continue } - if _, ok := qr.AnyBound().(int64); ok { - v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) - if err != nil { + if _, ok := qr.AnyBound().(*big.Int); ok { + v := new(big.Int) + eventValue := extractValueFromKey(it.Key()) + v, ok := v.SetString(eventValue, 10) + if !ok { continue LOOP } @@ -715,15 +718,15 @@ func startKey(fields ...interface{}) []byte { return b.Bytes() } -func checkBounds(ranges indexer.QueryRange, v int64) bool { +func checkBounds(ranges indexer.QueryRange, v *big.Int) bool { include := true lowerBound := ranges.LowerBoundValue() upperBound := ranges.UpperBoundValue() - if lowerBound != nil && v < lowerBound.(int64) { + if lowerBound != nil && v.Cmp(lowerBound.(*big.Int)) == -1 { include = false } - if upperBound != nil && v > upperBound.(int64) { + if upperBound != nil && v.Cmp(upperBound.(*big.Int)) == 1 { include = false } diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 40cd6a1b5b..e2e4eeecdb 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -19,6 +19,77 @@ import ( "github.com/tendermint/tendermint/types" ) +func TestBigInt(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB()) + + bigInt := "10000000000000000000" + bigIntPlus1 := "10000000000000000001" + bigFloat := bigInt + ".76" + bigFloatLower := bigInt + ".1" + + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigInt), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigIntPlus1), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigFloatLower), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("owner"), Value: []byte("/Ivan/"), Index: true}}}, + {Type: "", Attributes: []abci.EventAttribute{{Key: []byte("not_allowed"), Value: []byte("Vlad"), Index: true}}}, + }) + hash := types.Tx(txResult.Tx).Hash() + + err := indexer.Index(txResult) + + require.NoError(t, err) + + txResult2 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigFloat), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigFloat), Index: true}, {Key: []byte("amount"), Value: []byte("5"), Index: true}}}, + }) + + txResult2.Tx = types.Tx("NEW TX") + txResult2.Height = 2 + txResult2.Index = 2 + + hash2 := types.Tx(txResult2.Tx).Hash() + + err = indexer.Index(txResult2) + require.NoError(t, err) + testCases := []struct { + q string + txRes *abci.TxResult + resultsLength int + }{ + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash), txResult, 1}, + // search by hash (lower) + {fmt.Sprintf("tx.hash = '%x'", hash), txResult, 1}, + {fmt.Sprintf("tx.hash = '%x'", hash2), txResult2, 1}, + // search by exact match (one key) - bigint + {"match.events = 1 AND account.number >= " + bigInt, nil, 1}, + // search by exact match (one key) - bigint range + {"match.events = 1 AND account.number >= " + bigInt + " AND tx.height > 0", nil, 1}, + {"match.events = 1 AND account.number >= " + bigInt + " AND tx.height > 0 AND account.owner = '/Ivan/'", nil, 0}, + // Floats are not parsed + {"match.events = 1 AND account.number >= " + bigInt + " AND tx.height > 0 AND account.amount > 4", txResult2, 0}, + {"match.events = 1 AND account.number >= " + bigInt + " AND tx.height > 0 AND account.amount = 5", txResult2, 0}, + {"match.events = 1 AND account.number >= " + bigInt + " AND account.amount <= 5", txResult2, 0}, + {"match.events = 1 AND account.number < " + bigInt + " AND tx.height = 1", nil, 0}, + } + + ctx := context.Background() + + for _, tc := range testCases { + tc := tc + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(ctx, query.MustParse(tc.q)) + assert.NoError(t, err) + assert.Len(t, results, tc.resultsLength) + if tc.resultsLength > 0 && tc.txRes != nil { + assert.True(t, proto.Equal(results[0], tc.txRes)) + } + }) + } +} + func TestTxIndex(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) diff --git a/state/txindex/kv/utils.go b/state/txindex/kv/utils.go index a7ca5c826e..3dfa49b76a 100644 --- a/state/txindex/kv/utils.go +++ b/state/txindex/kv/utils.go @@ -2,6 +2,7 @@ package kv import ( "fmt" + "math/big" "github.com/google/orderedcode" "github.com/tendermint/tendermint/libs/pubsub/query" @@ -33,7 +34,7 @@ func dedupMatchEvents(conditions []query.Condition) ([]query.Condition, bool) { for i, c := range conditions { if c.CompositeKey == types.MatchEventKey { // Match events should be added only via RPC as the very first query condition - if i == 0 && c.Op == query.OpEqual && c.Operand.(int64) == 1 { + if i == 0 && c.Op == query.OpEqual && c.Operand.(*big.Int).Int64() == 1 { dedupConditions = append(dedupConditions, c) matchEvents = true } @@ -79,7 +80,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio } else { found = true heightCondition = append(heightCondition, c) - heightInfo.height = c.Operand.(int64) + heightInfo.height = c.Operand.(*big.Int).Int64() //Height is always int64 } } else { heightInfo.onlyHeightEq = false @@ -110,7 +111,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) bool { if heightInfo.heightRange.Key != "" { - if !checkBounds(heightInfo.heightRange, keyHeight) { + if !checkBounds(heightInfo.heightRange, big.NewInt(keyHeight)) { return false } } else { diff --git a/version/version.go b/version/version.go index 36c9901711..ea80cbcf36 100644 --- a/version/version.go +++ b/version/version.go @@ -3,7 +3,7 @@ package version const ( // TMCoreSemVer is the used as the fallback version of CometBFT Core // when not using git describe. It is formatted with semantic versioning. - TMCoreSemVer = "0.34.28" + TMCoreSemVer = "0.34.29" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.17.0"