diff --git a/.changelog/unreleased/improvements/1210-close-evidence-db.md b/.changelog/unreleased/improvements/1210-close-evidence-db.md new file mode 100644 index 0000000000..e32bc87dbe --- /dev/null +++ b/.changelog/unreleased/improvements/1210-close-evidence-db.md @@ -0,0 +1 @@ +- `[node]` Close evidence.db OnStop ([cometbft/cometbft\#1210](https://github.com/cometbft/cometbft/pull/1210): @chillyvee) diff --git a/.changelog/unreleased/improvements/857-make-handshake-cancelable.md b/.changelog/unreleased/improvements/857-make-handshake-cancelable.md new file mode 100644 index 0000000000..16b447f6d2 --- /dev/null +++ b/.changelog/unreleased/improvements/857-make-handshake-cancelable.md @@ -0,0 +1 @@ +- `[node]` Make handshake cancelable ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) diff --git a/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md b/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md new file mode 100644 index 0000000000..4a0000db6d --- /dev/null +++ b/.changelog/v0.34.29/bug-fixes/771-kvindexer-parsing-big-ints.md @@ -0,0 +1,2 @@ +- `[state/kvindex]` Querying event attributes that are bigger than int64 is now + enabled. ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md b/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md new file mode 100644 index 0000000000..fc5f25a90f --- /dev/null +++ b/.changelog/v0.34.29/bug-fixes/771-pubsub-parsing-big-ints.md @@ -0,0 +1,4 @@ +- `[pubsub]` Pubsub queries are now able to parse big integers (larger than + int64). Very big floats are also properly parsed into very big integers + instead of being truncated to int64. + ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md b/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md new file mode 100644 index 0000000000..3fddfee8e7 --- /dev/null +++ b/.changelog/v0.34.29/improvements/654-rpc-rm-response-data-logs.md @@ -0,0 +1,3 @@ +- `[rpc]` Remove response data from response failure logs in order + to prevent large quantities of log data from being produced + ([\#654](https://github.com/cometbft/cometbft/issues/654)) \ No newline at end of file diff --git a/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md b/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md new file mode 100644 index 0000000000..430b7b5ac4 --- /dev/null +++ b/.changelog/v0.34.29/security-fixes/788-rpc-client-pw.md @@ -0,0 +1,3 @@ +- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC + client credentials from being inadvertently dumped to logs + ([\#788](https://github.com/cometbft/cometbft/pull/788)) diff --git a/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md b/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md new file mode 100644 index 0000000000..782eccd9d5 --- /dev/null +++ b/.changelog/v0.34.29/security-fixes/794-cli-debug-kill-unsafe-cast.md @@ -0,0 +1,2 @@ +- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in + `debug kill` command ([\#794](https://github.com/cometbft/cometbft/pull/794)) diff --git a/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md b/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md new file mode 100644 index 0000000000..fdd9172c20 --- /dev/null +++ b/.changelog/v0.34.29/security-fixes/865-fix-peerstate-marshaljson.md @@ -0,0 +1,3 @@ +- `[consensus]` **Low severity** - Avoid recursive call after rename to + `(*PeerState).MarshalJSON` + ([\#863](https://github.com/cometbft/cometbft/pull/863)) diff --git a/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md b/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md new file mode 100644 index 0000000000..bad30efc7a --- /dev/null +++ b/.changelog/v0.34.29/security-fixes/890-mempool-fix-cache.md @@ -0,0 +1,3 @@ +- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from + appearing twice in the mempool + ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) diff --git a/.changelog/v0.34.29/summary.md b/.changelog/v0.34.29/summary.md new file mode 100644 index 0000000000..7ecb273940 --- /dev/null +++ b/.changelog/v0.34.29/summary.md @@ -0,0 +1,4 @@ +*June 14, 2023* + +Provides several minor bug fixes, as well as fixes for several low-severity +security issues. diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index ba206e4964..8b6705b1fd 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -45,7 +45,7 @@ jobs: with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 1 # we need a .git directory to run git diff diff --git a/.github/workflows/cometbft-docker.yml b/.github/workflows/cometbft-docker.yml index f4cf3f6f10..e70c65469d 100644 --- a/.github/workflows/cometbft-docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -14,7 +14,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare id: prep run: | @@ -40,10 +40,10 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v3.0.0 - name: Build but do not Publish to Docker Hub - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v5 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 8e009b138a..60fe9a6b84 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -9,7 +9,7 @@ jobs: split-test-files: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Create a file with all the pkgs run: go list ./... > pkgs.txt - name: Split pkgs into 4 files @@ -46,7 +46,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -68,7 +68,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -92,7 +92,7 @@ jobs: runs-on: ubuntu-latest needs: tests steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index bca2861687..5b93752694 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -18,7 +18,7 @@ jobs: with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 4845e97c94..dfa9b88526 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -25,7 +25,7 @@ jobs: with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: 'v0.34.x-celestia' @@ -49,7 +49,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} SLACK_CHANNEL: cometbft-engineering @@ -65,7 +65,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on success - uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} SLACK_CHANNEL: cometbft-engineering diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 2cb134aa90..f265593c8b 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -15,7 +15,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index c2fcda24c7..7e78fd0182 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -13,7 +13,7 @@ jobs: with: go-version: '1.19' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install go-fuzz working-directory: test/fuzz @@ -72,7 +72,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack if any crashers - uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} SLACK_CHANNEL: cometbft-engineering diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 33c46137d2..2a69042d98 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: '1.19' diff --git a/.github/workflows/markdown-linter.yml b/.github/workflows/markdown-linter.yml index bdbd7f2c33..6eeda3bc05 100644 --- a/.github/workflows/markdown-linter.yml +++ b/.github/workflows/markdown-linter.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Lint Code Base uses: docker://github/super-linter:v4 env: diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 2a9872bb9d..a7db3aae6c 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -44,7 +44,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for changes available in this pre-release, but not yet officially released." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v5 with: version: latest args: release --clean --release-notes ../release_notes.md @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack upon pre-release - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index 7579d8efb5..8ca233b00e 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -14,8 +14,8 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 5 steps: - - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.17.0 + - uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.26.1 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/release-version.yml b/.github/workflows/release-version.yml index 034d191f21..c7c977e4a3 100644 --- a/.github/workflows/release-version.yml +++ b/.github/workflows/release-version.yml @@ -11,7 +11,7 @@ jobs: check-version: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f3a9dd4e93..da1fe53d40 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -26,7 +26,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for this release." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v5 with: version: latest args: release --clean --release-notes ../release_notes.md diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml index 4a7efb141e..e9f84e525a 100644 --- a/.github/workflows/testapp-docker.yml +++ b/.github/workflows/testapp-docker.yml @@ -16,7 +16,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare id: prep run: | @@ -42,17 +42,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v3.0.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v3.0.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v5.0.0 with: context: . file: ./test/e2e/docker/Dockerfile diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 64bb85e744..38e5caa668 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -58,7 +58,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -90,7 +90,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version: "1.19" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/CHANGELOG.md b/CHANGELOG.md index b89bd25153..7ecaaaa713 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,41 @@ # CHANGELOG +## v0.34.29 + +*June 14, 2023* + +Provides several minor bug fixes, as well as fixes for several low-severity +security issues. + +### BUG FIXES + +- `[state/kvindex]` Querying event attributes that are bigger than int64 is now + enabled. ([\#771](https://github.com/cometbft/cometbft/pull/771)) +- `[pubsub]` Pubsub queries are now able to parse big integers (larger than + int64). Very big floats are also properly parsed into very big integers + instead of being truncated to int64. + ([\#771](https://github.com/cometbft/cometbft/pull/771)) + +### IMPROVEMENTS + +- `[rpc]` Remove response data from response failure logs in order + to prevent large quantities of log data from being produced + ([\#654](https://github.com/cometbft/cometbft/issues/654)) + +### SECURITY FIXES + +- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC + client credentials from being inadvertently dumped to logs + ([\#788](https://github.com/cometbft/cometbft/pull/788)) +- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in + `debug kill` command ([\#794](https://github.com/cometbft/cometbft/pull/794)) +- `[consensus]` **Low severity** - Avoid recursive call after rename to + `(*PeerState).MarshalJSON` + ([\#863](https://github.com/cometbft/cometbft/pull/863)) +- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from + appearing twice in the mempool + ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) + ## v0.34.28 *April 26, 2023* diff --git a/DOCKER/docker-entrypoint.sh b/DOCKER/docker-entrypoint.sh index 12a1aeaf84..8d00c79994 100755 --- a/DOCKER/docker-entrypoint.sh +++ b/DOCKER/docker-entrypoint.sh @@ -9,7 +9,7 @@ if [ ! -d "$CMTHOME/config" ]; then -e "s/^proxy_app\s*=.*/proxy_app = \"$PROXY_APP\"/" \ -e "s/^moniker\s*=.*/moniker = \"$MONIKER\"/" \ -e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \ - -e 's/^target_height_duration\s*=.*/target_height_duration = "1000ms"/' \ + -e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \ -e 's/^index_all_tags\s*=.*/index_all_tags = true/' \ -e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \ -e 's/^prometheus\s*=.*/prometheus = true/' \ diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 78d680accb..c2f5adaea8 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -9,6 +9,7 @@ import ( "context" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/tendermint/tendermint/abci/types" cmtnet "github.com/tendermint/tendermint/libs/net" @@ -88,8 +89,7 @@ func (cli *grpcClient) OnStart() error { RETRY_LOOP: for { - //nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option. - conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial(cli.addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc)) if err != nil { if cli.mustConnect { return err diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 0bd172eb25..c53f5d9230 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "golang.org/x/net/context" @@ -148,8 +149,7 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { }) // Connect to the socket - //nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option. - conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial(socket, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc)) if err != nil { t.Fatalf("Error dialing GRPC server: %v", err.Error()) } diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 51c0dec764..1aac7257a8 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1276,6 +1276,14 @@ type RequestPrepareProposal struct { BlockData *types1.Data `protobuf:"bytes,1,opt,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` // If an application decides to populate block_data with extra information, they can not exceed this value. BlockDataSize int64 `protobuf:"varint,2,opt,name=block_data_size,json=blockDataSize,proto3" json:"block_data_size,omitempty"` + // chain_id is a unique identifier for the blockchain network this proposal + // belongs to (e.g. mocha-1). + ChainId string `protobuf:"bytes,3,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // height is the height of the proposal block + Height int64 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` + // time is the time that will end up in the header. This is the voting power + // weighted median of the last commit. + Time time.Time `protobuf:"bytes,5,opt,name=time,proto3,stdtime" json:"time"` } func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } @@ -1325,6 +1333,27 @@ func (m *RequestPrepareProposal) GetBlockDataSize() int64 { return 0 } +func (m *RequestPrepareProposal) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +func (m *RequestPrepareProposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestPrepareProposal) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + type RequestProcessProposal struct { Header types1.Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header"` BlockData *types1.Data `protobuf:"bytes,2,opt,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` @@ -1379,6 +1408,7 @@ func (m *RequestProcessProposal) GetBlockData() *types1.Data { type Response struct { // Types that are valid to be assigned to Value: + // // *Response_Exception // *Response_Echo // *Response_Flush @@ -3514,195 +3544,196 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 2995 bytes of a gzipped FileDescriptorProto + // 3014 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0xe3, 0xc6, - 0x11, 0xe6, 0x9b, 0x44, 0x53, 0x7c, 0x68, 0x76, 0xbd, 0x4b, 0xc3, 0x6b, 0x69, 0x8d, 0x2d, 0xbf, - 0xd6, 0xb6, 0x14, 0xcb, 0xb5, 0x8e, 0x1d, 0x3b, 0xb1, 0x45, 0x2e, 0xd7, 0x94, 0x57, 0x96, 0x14, - 0x88, 0x5a, 0xe7, 0xe5, 0x85, 0x41, 0x72, 0x44, 0xc2, 0x4b, 0x02, 0x30, 0x30, 0xd4, 0x4a, 0x7b, - 0x4c, 0x25, 0x95, 0x2a, 0xe7, 0xe2, 0xaa, 0x5c, 0x72, 0xf1, 0x4f, 0xc8, 0x3d, 0x97, 0xe4, 0x92, - 0x8b, 0xab, 0x72, 0x88, 0x8f, 0x39, 0xa4, 0x9c, 0x94, 0x7d, 0xcb, 0x1f, 0xc8, 0x29, 0x95, 0xd4, - 0x3c, 0xf0, 0x22, 0x09, 0x11, 0xb2, 0x73, 0xcb, 0x0d, 0xd3, 0xe8, 0xfe, 0x80, 0x69, 0xcc, 0x74, - 0x7f, 0xdd, 0x18, 0x78, 0x82, 0x60, 0x73, 0x80, 0x9d, 0x89, 0x61, 0x92, 0x4d, 0xbd, 0xd7, 0x37, - 0x36, 0xc9, 0x99, 0x8d, 0xdd, 0x0d, 0xdb, 0xb1, 0x88, 0x85, 0x6a, 0xc1, 0xcd, 0x0d, 0x7a, 0x53, - 0x7e, 0x32, 0xa4, 0xdd, 0x77, 0xce, 0x6c, 0x62, 0x6d, 0xda, 0x8e, 0x65, 0x1d, 0x73, 0x7d, 0xf9, - 0x5a, 0xe8, 0x36, 0xc3, 0x09, 0xa3, 0x45, 0xee, 0x0a, 0xe3, 0x07, 0xf8, 0xcc, 0xbb, 0xfb, 0xe4, - 0x9c, 0xad, 0xad, 0x3b, 0xfa, 0xc4, 0xbb, 0xbd, 0x3e, 0xb4, 0xac, 0xe1, 0x18, 0x6f, 0xb2, 0x51, - 0x6f, 0x7a, 0xbc, 0x49, 0x8c, 0x09, 0x76, 0x89, 0x3e, 0xb1, 0x85, 0xc2, 0xe5, 0xa1, 0x35, 0xb4, - 0xd8, 0xe5, 0x26, 0xbd, 0xe2, 0x52, 0xe5, 0x0f, 0x12, 0x14, 0x55, 0xfc, 0xf1, 0x14, 0xbb, 0x04, - 0x6d, 0x41, 0x0e, 0xf7, 0x47, 0x56, 0x23, 0x7d, 0x3d, 0xfd, 0x5c, 0x79, 0xeb, 0xda, 0xc6, 0xcc, - 0xe4, 0x36, 0x84, 0x5e, 0xbb, 0x3f, 0xb2, 0x3a, 0x29, 0x95, 0xe9, 0xa2, 0x5b, 0x90, 0x3f, 0x1e, - 0x4f, 0xdd, 0x51, 0x23, 0xc3, 0x8c, 0x9e, 0x8c, 0x33, 0xba, 0x43, 0x95, 0x3a, 0x29, 0x95, 0x6b, - 0xd3, 0x47, 0x19, 0xe6, 0xb1, 0xd5, 0xc8, 0x9e, 0xff, 0xa8, 0x1d, 0xf3, 0x98, 0x3d, 0x8a, 0xea, - 0xa2, 0x26, 0x80, 0x8b, 0x89, 0x66, 0xd9, 0xc4, 0xb0, 0xcc, 0x46, 0x8e, 0x59, 0x3e, 0x15, 0x67, - 0x79, 0x88, 0xc9, 0x3e, 0x53, 0xec, 0xa4, 0x54, 0xc9, 0xf5, 0x06, 0x14, 0xc3, 0x30, 0x0d, 0xa2, - 0xf5, 0x47, 0xba, 0x61, 0x36, 0xf2, 0xe7, 0x63, 0xec, 0x98, 0x06, 0x69, 0x51, 0x45, 0x8a, 0x61, - 0x78, 0x03, 0x3a, 0xe5, 0x8f, 0xa7, 0xd8, 0x39, 0x6b, 0x14, 0xce, 0x9f, 0xf2, 0x0f, 0xa9, 0x12, - 0x9d, 0x32, 0xd3, 0x46, 0x6d, 0x28, 0xf7, 0xf0, 0xd0, 0x30, 0xb5, 0xde, 0xd8, 0xea, 0x3f, 0x68, - 0x14, 0x99, 0xb1, 0x12, 0x67, 0xdc, 0xa4, 0xaa, 0x4d, 0xaa, 0xd9, 0x49, 0xa9, 0xd0, 0xf3, 0x47, - 0xe8, 0x4d, 0x28, 0xf5, 0x47, 0xb8, 0xff, 0x40, 0x23, 0xa7, 0x8d, 0x12, 0xc3, 0x58, 0x8f, 0xc3, - 0x68, 0x51, 0xbd, 0xee, 0x69, 0x27, 0xa5, 0x16, 0xfb, 0xfc, 0x92, 0xce, 0x7f, 0x80, 0xc7, 0xc6, - 0x09, 0x76, 0xa8, 0xbd, 0x74, 0xfe, 0xfc, 0x6f, 0x73, 0x4d, 0x86, 0x20, 0x0d, 0xbc, 0x01, 0x7a, - 0x0b, 0x24, 0x6c, 0x0e, 0xc4, 0x34, 0x80, 0x41, 0x5c, 0x8f, 0x5d, 0x2b, 0xe6, 0xc0, 0x9b, 0x44, - 0x09, 0x8b, 0x6b, 0xf4, 0x1a, 0x14, 0xfa, 0xd6, 0x64, 0x62, 0x90, 0x46, 0x99, 0x59, 0xaf, 0xc5, - 0x4e, 0x80, 0x69, 0x75, 0x52, 0xaa, 0xd0, 0x47, 0x7b, 0x50, 0x1d, 0x1b, 0x2e, 0xd1, 0x5c, 0x53, - 0xb7, 0xdd, 0x91, 0x45, 0xdc, 0xc6, 0x0a, 0x43, 0x78, 0x3a, 0x0e, 0x61, 0xd7, 0x70, 0xc9, 0xa1, - 0xa7, 0xdc, 0x49, 0xa9, 0x95, 0x71, 0x58, 0x40, 0xf1, 0xac, 0xe3, 0x63, 0xec, 0xf8, 0x80, 0x8d, - 0xca, 0xf9, 0x78, 0xfb, 0x54, 0xdb, 0xb3, 0xa7, 0x78, 0x56, 0x58, 0x80, 0x7e, 0x0a, 0x97, 0xc6, - 0x96, 0x3e, 0xf0, 0xe1, 0xb4, 0xfe, 0x68, 0x6a, 0x3e, 0x68, 0x54, 0x19, 0xe8, 0xf3, 0xb1, 0x2f, - 0x69, 0xe9, 0x03, 0x0f, 0xa2, 0x45, 0x0d, 0x3a, 0x29, 0x75, 0x75, 0x3c, 0x2b, 0x44, 0xf7, 0xe1, - 0xb2, 0x6e, 0xdb, 0xe3, 0xb3, 0x59, 0xf4, 0x1a, 0x43, 0xbf, 0x19, 0x87, 0xbe, 0x4d, 0x6d, 0x66, - 0xe1, 0x91, 0x3e, 0x27, 0x45, 0x5d, 0xa8, 0xdb, 0x0e, 0xb6, 0x75, 0x07, 0x6b, 0xb6, 0x63, 0xd9, - 0x96, 0xab, 0x8f, 0x1b, 0x75, 0x86, 0xfd, 0x6c, 0x1c, 0xf6, 0x01, 0xd7, 0x3f, 0x10, 0xea, 0x9d, - 0x94, 0x5a, 0xb3, 0xa3, 0x22, 0x8e, 0x6a, 0xf5, 0xb1, 0xeb, 0x06, 0xa8, 0xab, 0xcb, 0x50, 0x99, - 0x7e, 0x14, 0x35, 0x22, 0x6a, 0x16, 0x21, 0x7f, 0xa2, 0x8f, 0xa7, 0x58, 0x79, 0x16, 0xca, 0xa1, - 0xb0, 0x84, 0x1a, 0x50, 0x9c, 0x60, 0xd7, 0xd5, 0x87, 0x98, 0x45, 0x31, 0x49, 0xf5, 0x86, 0x4a, - 0x15, 0x56, 0xc2, 0xa1, 0x48, 0x99, 0xf8, 0x86, 0x34, 0xc8, 0x50, 0xc3, 0x13, 0xec, 0xb8, 0x34, - 0xb2, 0x08, 0x43, 0x31, 0x44, 0x37, 0xa0, 0xc2, 0x96, 0xba, 0xe6, 0xdd, 0xa7, 0x91, 0x2e, 0xa7, - 0xae, 0x30, 0xe1, 0x3d, 0xa1, 0xb4, 0x0e, 0x65, 0x7b, 0xcb, 0xf6, 0x55, 0xb2, 0x4c, 0x05, 0xec, - 0x2d, 0x5b, 0x28, 0x28, 0xdf, 0x83, 0xfa, 0x6c, 0x64, 0x42, 0x75, 0xc8, 0x3e, 0xc0, 0x67, 0xe2, - 0x79, 0xf4, 0x12, 0x5d, 0x16, 0xd3, 0x62, 0xcf, 0x90, 0x54, 0x31, 0xc7, 0x3f, 0x67, 0x7c, 0x63, - 0x3f, 0x24, 0xa1, 0xd7, 0x20, 0x47, 0x23, 0xbc, 0x08, 0xd6, 0xf2, 0x06, 0x0f, 0xff, 0x1b, 0x5e, - 0xf8, 0xdf, 0xe8, 0x7a, 0xe1, 0xbf, 0x59, 0xfa, 0xfc, 0xcb, 0xf5, 0xd4, 0xa7, 0x7f, 0x5f, 0x4f, - 0xab, 0xcc, 0x02, 0x3d, 0x4e, 0x23, 0x88, 0x6e, 0x98, 0x9a, 0x31, 0x10, 0xcf, 0x29, 0xb2, 0xf1, - 0xce, 0x00, 0xdd, 0x85, 0x7a, 0xdf, 0x32, 0x5d, 0x6c, 0xba, 0x53, 0x57, 0xe3, 0xe9, 0x45, 0x84, - 0xe8, 0xf9, 0x1d, 0xde, 0xf2, 0x14, 0x0f, 0x98, 0x9e, 0x5a, 0xeb, 0x47, 0x05, 0xe8, 0x0e, 0xc0, - 0x89, 0x3e, 0x36, 0x06, 0x3a, 0xb1, 0x1c, 0xb7, 0x91, 0xbb, 0x9e, 0x5d, 0x08, 0x73, 0xcf, 0x53, - 0x39, 0xb2, 0x07, 0x3a, 0xc1, 0xcd, 0x1c, 0x7d, 0x5b, 0x35, 0x64, 0x89, 0x9e, 0x81, 0x9a, 0x6e, - 0xdb, 0x9a, 0x4b, 0x74, 0x82, 0xb5, 0xde, 0x19, 0xc1, 0x2e, 0x0b, 0xdc, 0x2b, 0x6a, 0x45, 0xb7, - 0xed, 0x43, 0x2a, 0x6d, 0x52, 0x21, 0x7a, 0x1a, 0xaa, 0x34, 0x48, 0x1b, 0xfa, 0x58, 0x1b, 0x61, - 0x63, 0x38, 0x22, 0x2c, 0x40, 0x67, 0xd5, 0x8a, 0x90, 0x76, 0x98, 0x50, 0x19, 0xf8, 0x0b, 0x81, - 0x05, 0x68, 0x84, 0x20, 0x37, 0xd0, 0x89, 0xce, 0x1c, 0xb9, 0xa2, 0xb2, 0x6b, 0x2a, 0xb3, 0x75, - 0x32, 0x12, 0xee, 0x61, 0xd7, 0xe8, 0x0a, 0x14, 0x04, 0x6c, 0x96, 0xc1, 0x8a, 0x11, 0xfd, 0x66, - 0xb6, 0x63, 0x9d, 0x60, 0x96, 0x91, 0x4a, 0x2a, 0x1f, 0x28, 0xbf, 0xc8, 0xc0, 0xea, 0x5c, 0x28, - 0xa7, 0xb8, 0x23, 0xdd, 0x1d, 0x79, 0xcf, 0xa2, 0xd7, 0xe8, 0x55, 0x8a, 0xab, 0x0f, 0xb0, 0x23, - 0x52, 0x68, 0x23, 0xec, 0x22, 0x4e, 0x0f, 0x3a, 0xec, 0xbe, 0x70, 0x8d, 0xd0, 0x46, 0xfb, 0x50, - 0x1f, 0xeb, 0x2e, 0xd1, 0x78, 0x68, 0xd4, 0x42, 0xe9, 0x74, 0x3e, 0x21, 0xec, 0xea, 0x5e, 0x30, - 0xa5, 0x8b, 0x5d, 0x00, 0x55, 0xc7, 0x11, 0x29, 0x52, 0xe1, 0x72, 0xef, 0xec, 0x91, 0x6e, 0x12, - 0xc3, 0xc4, 0xda, 0xdc, 0x97, 0x7b, 0x7c, 0x0e, 0xb4, 0x7d, 0x62, 0x0c, 0xb0, 0xd9, 0xf7, 0x3e, - 0xd9, 0x25, 0xdf, 0xd8, 0xff, 0xa4, 0xae, 0xa2, 0x42, 0x35, 0x9a, 0x8c, 0x50, 0x15, 0x32, 0xe4, - 0x54, 0x38, 0x20, 0x43, 0x4e, 0xd1, 0x77, 0x20, 0x47, 0x27, 0xc9, 0x26, 0x5f, 0x5d, 0xc0, 0x04, - 0x84, 0x5d, 0xf7, 0xcc, 0xc6, 0x2a, 0xd3, 0x54, 0x14, 0x7f, 0x37, 0xf8, 0x09, 0x6a, 0x16, 0x55, - 0x79, 0x1e, 0x6a, 0x33, 0x19, 0x28, 0xf4, 0xfd, 0xd2, 0xe1, 0xef, 0xa7, 0xd4, 0xa0, 0x12, 0x49, - 0x37, 0xca, 0x15, 0xb8, 0xbc, 0x28, 0x7b, 0x28, 0x23, 0x5f, 0x1e, 0xc9, 0x02, 0xe8, 0x16, 0x94, - 0xfc, 0xf4, 0xc1, 0x77, 0xe3, 0xbc, 0xaf, 0x3c, 0x65, 0xd5, 0x57, 0xa5, 0xdb, 0x90, 0x2e, 0x6b, - 0xb6, 0x1e, 0x32, 0xec, 0xc5, 0x8b, 0xba, 0x6d, 0x77, 0x74, 0x77, 0xa4, 0x7c, 0x08, 0x8d, 0xb8, - 0xd4, 0x30, 0x33, 0x8d, 0x9c, 0xbf, 0x0c, 0xaf, 0x40, 0xe1, 0xd8, 0x72, 0x26, 0x3a, 0x61, 0x60, - 0x15, 0x55, 0x8c, 0xe8, 0xf2, 0xe4, 0x69, 0x22, 0xcb, 0xc4, 0x7c, 0xa0, 0x68, 0xf0, 0x78, 0x6c, - 0x7a, 0xa0, 0x26, 0x86, 0x39, 0xc0, 0xdc, 0x9f, 0x15, 0x95, 0x0f, 0x02, 0x20, 0xfe, 0xb2, 0x7c, - 0x40, 0x1f, 0xeb, 0xb2, 0xb9, 0x32, 0x7c, 0x49, 0x15, 0x23, 0xe5, 0x21, 0x5c, 0x59, 0x9c, 0x23, - 0xd0, 0x2d, 0x00, 0x1e, 0x4f, 0xfd, 0x5d, 0x57, 0xde, 0xba, 0x32, 0xbf, 0xe6, 0x6f, 0xeb, 0x44, - 0x57, 0x25, 0xa6, 0x49, 0x2f, 0x69, 0x14, 0x08, 0xcc, 0x34, 0xd7, 0x78, 0xc4, 0x97, 0x4c, 0x56, - 0xad, 0xf8, 0x3a, 0x87, 0xc6, 0x23, 0xac, 0xfc, 0x2a, 0x1d, 0x7a, 0x72, 0x24, 0x69, 0x84, 0x76, - 0x5a, 0xfa, 0x42, 0x3b, 0x2d, 0xfa, 0xc6, 0x99, 0x84, 0x6f, 0xac, 0xfc, 0x06, 0xa0, 0xa4, 0x62, - 0xd7, 0xa6, 0x61, 0x11, 0x35, 0x41, 0xc2, 0xa7, 0x7d, 0xcc, 0xb9, 0x6b, 0x3a, 0x96, 0xfb, 0x71, - 0xed, 0xb6, 0xa7, 0x49, 0x89, 0x97, 0x6f, 0x86, 0x5e, 0x11, 0xfc, 0x3c, 0x9e, 0x6a, 0x0b, 0xf3, - 0x30, 0x41, 0x7f, 0xd5, 0x23, 0xe8, 0xd9, 0x58, 0xae, 0xc5, 0xad, 0x66, 0x18, 0xfa, 0x2b, 0x82, - 0xa1, 0xe7, 0x96, 0x3c, 0x2c, 0x42, 0xd1, 0x5b, 0x11, 0x8a, 0x9e, 0x5f, 0x32, 0xcd, 0x18, 0x8e, - 0xde, 0x8a, 0x70, 0xf4, 0xc2, 0x12, 0x90, 0x18, 0x92, 0xfe, 0xaa, 0x47, 0xd2, 0x8b, 0x4b, 0xa6, - 0x3d, 0xc3, 0xd2, 0xef, 0x44, 0x59, 0x3a, 0x67, 0xd8, 0x37, 0x62, 0xad, 0x63, 0x69, 0xfa, 0xf7, - 0x43, 0x34, 0x5d, 0x8a, 0xe5, 0xc8, 0x1c, 0x64, 0x01, 0x4f, 0x6f, 0x45, 0x78, 0x3a, 0x2c, 0xf1, - 0x41, 0x0c, 0x51, 0x7f, 0x3b, 0x4c, 0xd4, 0xcb, 0xb1, 0x5c, 0x5f, 0x2c, 0x9a, 0x45, 0x4c, 0xfd, - 0x75, 0x9f, 0xa9, 0xaf, 0xc4, 0x96, 0x1a, 0x62, 0x0e, 0xb3, 0x54, 0x7d, 0x7f, 0x8e, 0xaa, 0x73, - 0x6a, 0xfd, 0x4c, 0x2c, 0xc4, 0x12, 0xae, 0xbe, 0x3f, 0xc7, 0xd5, 0xab, 0x4b, 0x00, 0x97, 0x90, - 0xf5, 0x9f, 0x2d, 0x26, 0xeb, 0xf1, 0x74, 0x5a, 0xbc, 0x66, 0x32, 0xb6, 0xae, 0xc5, 0xb0, 0x75, - 0xce, 0xa8, 0x5f, 0x88, 0x85, 0x4f, 0x4c, 0xd7, 0x8f, 0x16, 0xd0, 0x75, 0x4e, 0xac, 0x9f, 0x8b, - 0x05, 0x4f, 0xc0, 0xd7, 0x8f, 0x16, 0xf0, 0x75, 0xb4, 0x14, 0x36, 0x39, 0x61, 0x7f, 0x9e, 0xf2, - 0xa2, 0x99, 0x30, 0x47, 0x73, 0x0b, 0x76, 0x1c, 0xcb, 0x11, 0x5c, 0x98, 0x0f, 0x94, 0xe7, 0x28, - 0x53, 0x0b, 0x42, 0xda, 0x39, 0xe4, 0x9e, 0xe5, 0xf0, 0x50, 0x18, 0x53, 0x7e, 0x9f, 0x0e, 0x6c, - 0x19, 0xb9, 0x09, 0xb3, 0x3c, 0x49, 0xb0, 0xbc, 0x10, 0xe7, 0xcf, 0x44, 0x39, 0xff, 0x3a, 0x94, - 0x69, 0x6e, 0x9e, 0xa1, 0xf3, 0xba, 0xed, 0xd1, 0x79, 0x74, 0x13, 0x56, 0x19, 0xf9, 0xe2, 0x79, - 0x41, 0x24, 0xe4, 0x1c, 0xcb, 0x47, 0x35, 0x7a, 0x83, 0x6f, 0x25, 0x9e, 0x99, 0x5f, 0x82, 0x4b, - 0x21, 0x5d, 0x3f, 0xe7, 0x73, 0x0e, 0x5b, 0xf7, 0xb5, 0xb7, 0x45, 0xf2, 0x7f, 0x2f, 0x70, 0x50, - 0x50, 0x2a, 0x20, 0xc8, 0xf5, 0xad, 0x01, 0x16, 0x19, 0x99, 0x5d, 0xd3, 0xf2, 0x61, 0x6c, 0x0d, - 0x45, 0xde, 0xa5, 0x97, 0x54, 0xcb, 0x8f, 0xd9, 0x12, 0x0f, 0xc9, 0xca, 0x9f, 0xd2, 0x01, 0x5e, - 0x50, 0x3d, 0x2c, 0x22, 0xfa, 0xe9, 0xff, 0x0d, 0xd1, 0xcf, 0x7c, 0x63, 0xa2, 0x1f, 0x66, 0x44, - 0xd9, 0x28, 0x23, 0xfa, 0x57, 0x3a, 0xf8, 0xc2, 0x3e, 0x6d, 0xff, 0x66, 0x1e, 0x09, 0xe8, 0x4d, - 0x9e, 0x7d, 0x2f, 0x41, 0x6f, 0x44, 0x31, 0x56, 0x60, 0xcf, 0x8d, 0x16, 0x63, 0x45, 0x4e, 0x78, - 0xd8, 0x00, 0xbd, 0x06, 0x12, 0xeb, 0xe8, 0x69, 0x96, 0xed, 0x8a, 0xf4, 0xf0, 0x44, 0x78, 0xae, - 0xbc, 0x71, 0xb7, 0x71, 0x40, 0x75, 0xf6, 0x6d, 0x57, 0x2d, 0xd9, 0xe2, 0x2a, 0xc4, 0xdc, 0xa4, - 0x48, 0x01, 0x71, 0x0d, 0x24, 0xfa, 0xf6, 0xae, 0xad, 0xf7, 0x31, 0x0b, 0xf5, 0x92, 0x1a, 0x08, - 0x94, 0xfb, 0x80, 0xe6, 0x93, 0x0d, 0xea, 0x40, 0x01, 0x9f, 0x60, 0x93, 0xd0, 0xaf, 0x96, 0x9d, - 0xa5, 0x23, 0x82, 0x9d, 0x63, 0x93, 0x34, 0x1b, 0xd4, 0xc9, 0xff, 0xfc, 0x72, 0xbd, 0xce, 0xb5, - 0x5f, 0xb4, 0x26, 0x06, 0xc1, 0x13, 0x9b, 0x9c, 0xa9, 0xc2, 0x5e, 0xf9, 0x5b, 0x86, 0x52, 0xe5, - 0x48, 0x22, 0x5a, 0xe8, 0x5b, 0x6f, 0x03, 0x65, 0x42, 0x65, 0x52, 0x32, 0x7f, 0xaf, 0x01, 0x0c, - 0x75, 0x57, 0x7b, 0xa8, 0x9b, 0x04, 0x0f, 0x84, 0xd3, 0x43, 0x12, 0x24, 0x43, 0x89, 0x8e, 0xa6, - 0x2e, 0x1e, 0x88, 0x8a, 0xcd, 0x1f, 0x87, 0xe6, 0x59, 0xfc, 0x76, 0xf3, 0x8c, 0x7a, 0xb9, 0x34, - 0xe3, 0xe5, 0x10, 0x8d, 0x95, 0xc2, 0x34, 0x96, 0xbe, 0x9b, 0xed, 0x18, 0x96, 0x63, 0x90, 0x33, - 0xf6, 0x69, 0xb2, 0xaa, 0x3f, 0x46, 0x37, 0xa0, 0x32, 0xc1, 0x13, 0xdb, 0xb2, 0xc6, 0x1a, 0x0f, - 0x5e, 0x65, 0x66, 0xba, 0x22, 0x84, 0x6d, 0x16, 0xc3, 0x7e, 0x99, 0x09, 0xb6, 0x5f, 0x50, 0xae, - 0xfc, 0xdf, 0x39, 0x58, 0xf9, 0x35, 0xeb, 0x61, 0x44, 0xa9, 0x06, 0x3a, 0x84, 0x55, 0x7f, 0xfb, - 0x6b, 0x53, 0x16, 0x16, 0xbc, 0x05, 0x9d, 0x34, 0x7e, 0xd4, 0x4f, 0xa2, 0x62, 0x17, 0xfd, 0x08, - 0xae, 0xce, 0x84, 0x36, 0x1f, 0x3a, 0x93, 0x30, 0xc2, 0x3d, 0x16, 0x8d, 0x70, 0x1e, 0x72, 0xe0, - 0xab, 0xec, 0xb7, 0xdc, 0x74, 0x3b, 0xb4, 0x2c, 0x0e, 0x13, 0xa7, 0x85, 0x5f, 0xff, 0x06, 0x54, - 0x1c, 0x4c, 0x74, 0xc3, 0xd4, 0x22, 0x8d, 0x87, 0x15, 0x2e, 0x14, 0xed, 0x8c, 0x03, 0x78, 0x6c, - 0x21, 0x81, 0x42, 0xdf, 0x05, 0x29, 0xe0, 0x5e, 0xe9, 0x98, 0x1a, 0xde, 0xaf, 0x4b, 0x03, 0x5d, - 0xe5, 0x8f, 0xe9, 0x00, 0x32, 0x5a, 0xe9, 0xb6, 0xa1, 0xe0, 0x60, 0x77, 0x3a, 0xe6, 0xb5, 0x67, - 0x75, 0xeb, 0xa5, 0x64, 0xd4, 0x8b, 0x4a, 0xa7, 0x63, 0xa2, 0x0a, 0x63, 0xe5, 0x3e, 0x14, 0xb8, - 0x04, 0x95, 0xa1, 0x78, 0xb4, 0x77, 0x77, 0x6f, 0xff, 0xfd, 0xbd, 0x7a, 0x0a, 0x01, 0x14, 0xb6, - 0x5b, 0xad, 0xf6, 0x41, 0xb7, 0x9e, 0x46, 0x12, 0xe4, 0xb7, 0x9b, 0xfb, 0x6a, 0xb7, 0x9e, 0xa1, - 0x62, 0xb5, 0xfd, 0x6e, 0xbb, 0xd5, 0xad, 0x67, 0xd1, 0x2a, 0x54, 0xf8, 0xb5, 0x76, 0x67, 0x5f, - 0x7d, 0x6f, 0xbb, 0x5b, 0xcf, 0x85, 0x44, 0x87, 0xed, 0xbd, 0xdb, 0x6d, 0xb5, 0x9e, 0x57, 0x5e, - 0xa6, 0xc5, 0x6d, 0x0c, 0x59, 0x0b, 0xca, 0xd8, 0x74, 0xa8, 0x8c, 0x55, 0x7e, 0x9b, 0x01, 0x39, - 0x9e, 0x81, 0xa1, 0x77, 0x67, 0x26, 0xbe, 0x75, 0x01, 0xfa, 0x36, 0x33, 0x7b, 0xf4, 0x34, 0x54, - 0x1d, 0x7c, 0x8c, 0x49, 0x7f, 0xc4, 0x19, 0x21, 0xcf, 0x98, 0x15, 0xb5, 0x22, 0xa4, 0xcc, 0xc8, - 0xe5, 0x6a, 0x1f, 0xe1, 0x3e, 0xd1, 0x78, 0x28, 0xe2, 0x8b, 0x4e, 0xa2, 0x6a, 0x54, 0x7a, 0xc8, - 0x85, 0xca, 0x87, 0x17, 0xf2, 0xa5, 0x04, 0x79, 0xb5, 0xdd, 0x55, 0x7f, 0x5c, 0xcf, 0x22, 0x04, - 0x55, 0x76, 0xa9, 0x1d, 0xee, 0x6d, 0x1f, 0x1c, 0x76, 0xf6, 0xa9, 0x2f, 0x2f, 0x41, 0xcd, 0xf3, - 0xa5, 0x27, 0xcc, 0x2b, 0x07, 0x70, 0x35, 0x86, 0x3e, 0x7e, 0xc3, 0x52, 0x5e, 0xf9, 0x5d, 0x3a, - 0x0c, 0x19, 0xad, 0xd1, 0xdf, 0x99, 0xf1, 0xf4, 0x66, 0x52, 0xd2, 0x39, 0xeb, 0x66, 0x19, 0x4a, - 0x58, 0x34, 0xa8, 0x98, 0x83, 0x57, 0x54, 0x7f, 0xac, 0xbc, 0xb4, 0xdc, 0x69, 0xc1, 0xaa, 0xcb, - 0x28, 0xff, 0x49, 0x43, 0x6d, 0x26, 0x44, 0xa0, 0x2d, 0xc8, 0xf3, 0xba, 0x2a, 0xee, 0x67, 0x19, - 0x8b, 0x70, 0x22, 0x9e, 0x70, 0x55, 0xf4, 0x66, 0xe4, 0x95, 0xe6, 0x42, 0x11, 0x77, 0x96, 0xd7, - 0x55, 0x13, 0xa6, 0xbe, 0x05, 0x7a, 0x0b, 0x24, 0x3f, 0xd6, 0x89, 0x62, 0xfe, 0xa9, 0x79, 0x73, - 0x3f, 0x4a, 0x0a, 0xfb, 0xc0, 0x06, 0xbd, 0x1e, 0xd0, 0xdd, 0xdc, 0x7c, 0x35, 0x27, 0xcc, 0xb9, - 0x82, 0x30, 0xf6, 0xf4, 0x95, 0x16, 0x94, 0x43, 0xf3, 0x41, 0x4f, 0x80, 0x34, 0xd1, 0x4f, 0x45, - 0x2f, 0x96, 0x77, 0xd3, 0x4a, 0x13, 0xfd, 0x94, 0xb7, 0x61, 0xaf, 0x42, 0x91, 0xde, 0x1c, 0xea, - 0xae, 0x68, 0xd0, 0x14, 0x26, 0xfa, 0xe9, 0x3b, 0xba, 0xab, 0x7c, 0x00, 0xd5, 0x68, 0x1f, 0x92, - 0xee, 0x45, 0xc7, 0x9a, 0x9a, 0x03, 0x86, 0x91, 0x57, 0xf9, 0x00, 0xdd, 0x82, 0xfc, 0x89, 0xc5, - 0xc3, 0xf5, 0xe2, 0xa0, 0x75, 0xcf, 0x22, 0x38, 0xd4, 0xc7, 0xe4, 0xda, 0xca, 0x23, 0xc8, 0xb3, - 0xf0, 0x4b, 0x43, 0x29, 0xeb, 0x28, 0x0a, 0xaa, 0x4f, 0xaf, 0xd1, 0x07, 0x00, 0x3a, 0x21, 0x8e, - 0xd1, 0x9b, 0x06, 0xc0, 0xeb, 0x8b, 0xc3, 0xf7, 0xb6, 0xa7, 0xd7, 0xbc, 0x26, 0xe2, 0xf8, 0xe5, - 0xc0, 0x34, 0x14, 0xcb, 0x43, 0x80, 0xca, 0x1e, 0x54, 0xa3, 0xb6, 0xe1, 0xde, 0xfe, 0xca, 0x82, - 0xde, 0xbe, 0x4f, 0x27, 0x7d, 0x32, 0x9a, 0xe5, 0xdd, 0x63, 0x36, 0x50, 0x3e, 0x49, 0x43, 0xa9, - 0x7b, 0x2a, 0xd6, 0x68, 0x4c, 0xe3, 0x32, 0x30, 0xcd, 0x84, 0xdb, 0x74, 0xbc, 0x13, 0x9a, 0xf5, - 0xfb, 0xab, 0x6f, 0xfb, 0x1b, 0x2a, 0x97, 0xb4, 0x8b, 0xe0, 0xb5, 0xbf, 0x44, 0xb8, 0x7e, 0x03, - 0x24, 0x7f, 0x55, 0xd1, 0x9a, 0x49, 0x1f, 0x0c, 0x1c, 0xec, 0xba, 0x62, 0x6e, 0xde, 0x90, 0xf5, - 0xc1, 0xad, 0x87, 0xa2, 0x11, 0x98, 0x55, 0xf9, 0x40, 0x19, 0x40, 0x6d, 0x26, 0x71, 0xa3, 0x37, - 0xa0, 0x68, 0x4f, 0x7b, 0x9a, 0xe7, 0x9e, 0x99, 0xcd, 0xe3, 0xf1, 0xe7, 0x69, 0x6f, 0x6c, 0xf4, - 0xef, 0xe2, 0x33, 0xef, 0x65, 0xec, 0x69, 0xef, 0x2e, 0xf7, 0x22, 0x7f, 0x4a, 0x26, 0xfc, 0x94, - 0x13, 0x28, 0x79, 0x8b, 0x02, 0xfd, 0x20, 0xbc, 0x4f, 0xbc, 0xbf, 0x23, 0xb1, 0x64, 0x42, 0xc0, - 0x87, 0xb6, 0xc9, 0x4d, 0x58, 0x75, 0x8d, 0xa1, 0x89, 0x07, 0x5a, 0x50, 0xb5, 0xb1, 0xa7, 0x95, - 0xd4, 0x1a, 0xbf, 0xb1, 0xeb, 0x95, 0x6c, 0xca, 0xbf, 0xd3, 0x50, 0xf2, 0x36, 0x2c, 0x7a, 0x39, - 0xb4, 0xee, 0xaa, 0x0b, 0x3a, 0x66, 0x9e, 0x62, 0xd0, 0xca, 0x8e, 0xbe, 0x6b, 0xe6, 0xe2, 0xef, - 0x1a, 0xf7, 0x4f, 0xc2, 0xfb, 0x39, 0x94, 0xbb, 0xf0, 0xcf, 0xa1, 0x17, 0x01, 0x11, 0x8b, 0xe8, - 0x63, 0xed, 0xc4, 0x22, 0x86, 0x39, 0xd4, 0xb8, 0xb3, 0x39, 0xa7, 0xac, 0xb3, 0x3b, 0xf7, 0xd8, - 0x8d, 0x03, 0xe6, 0xf7, 0x9f, 0xa7, 0xa1, 0xe4, 0xb3, 0x83, 0x8b, 0x76, 0xa6, 0xaf, 0x40, 0x41, - 0x24, 0x40, 0xde, 0x9a, 0x16, 0x23, 0xff, 0x27, 0x49, 0x2e, 0xf4, 0x93, 0x44, 0x86, 0xd2, 0x04, - 0x13, 0x9d, 0xe5, 0x19, 0x5e, 0x38, 0xfb, 0xe3, 0x9b, 0xaf, 0x43, 0x39, 0xf4, 0x93, 0x80, 0xee, - 0xbc, 0xbd, 0xf6, 0xfb, 0xf5, 0x94, 0x5c, 0xfc, 0xe4, 0xb3, 0xeb, 0xd9, 0x3d, 0xfc, 0x90, 0xae, - 0x59, 0xb5, 0xdd, 0xea, 0xb4, 0x5b, 0x77, 0xeb, 0x69, 0xb9, 0xfc, 0xc9, 0x67, 0xd7, 0x8b, 0x2a, - 0x66, 0x8d, 0xb6, 0x9b, 0x1d, 0x58, 0x09, 0x7f, 0x95, 0x68, 0x3a, 0x40, 0x50, 0xbd, 0x7d, 0x74, - 0xb0, 0xbb, 0xd3, 0xda, 0xee, 0xb6, 0xb5, 0x7b, 0xfb, 0xdd, 0x76, 0x3d, 0x8d, 0xae, 0xc2, 0xa5, - 0xdd, 0x9d, 0x77, 0x3a, 0x5d, 0xad, 0xb5, 0xbb, 0xd3, 0xde, 0xeb, 0x6a, 0xdb, 0xdd, 0xee, 0x76, - 0xeb, 0x6e, 0x3d, 0xb3, 0xf5, 0x97, 0x32, 0xd4, 0xb6, 0x9b, 0xad, 0x1d, 0x9a, 0xff, 0x8d, 0xbe, - 0x2e, 0x1a, 0x99, 0x39, 0xd6, 0xb7, 0x38, 0xf7, 0x24, 0x85, 0x7c, 0x7e, 0x1f, 0x17, 0xdd, 0x81, - 0x3c, 0x6b, 0x69, 0xa0, 0xf3, 0x8f, 0x56, 0xc8, 0x4b, 0x1a, 0xbb, 0xf4, 0x65, 0xd8, 0xf6, 0x38, - 0xf7, 0xac, 0x85, 0x7c, 0x7e, 0x9f, 0x17, 0xa9, 0x20, 0x05, 0x3d, 0x89, 0xe5, 0x67, 0x2f, 0xe4, - 0x04, 0xbd, 0x5f, 0x8a, 0x19, 0x14, 0x46, 0xcb, 0xcf, 0x22, 0xc8, 0x09, 0x02, 0x18, 0xda, 0x85, - 0xa2, 0x57, 0xcb, 0x2e, 0x3b, 0x1d, 0x21, 0x2f, 0xed, 0xcb, 0xd2, 0x4f, 0xc0, 0x7b, 0x0e, 0xe7, - 0x1f, 0xf5, 0x90, 0x97, 0x34, 0x99, 0xd1, 0x0e, 0x14, 0x04, 0xdb, 0x5f, 0x72, 0xe2, 0x41, 0x5e, - 0xd6, 0x67, 0xa5, 0x4e, 0x0b, 0x9a, 0x39, 0xcb, 0x0f, 0xb0, 0xc8, 0x09, 0xfa, 0xe7, 0xe8, 0x08, - 0x20, 0xd4, 0x61, 0x48, 0x70, 0x32, 0x45, 0x4e, 0xd2, 0x17, 0x47, 0xfb, 0x50, 0xf2, 0x0b, 0xbe, - 0xa5, 0xe7, 0x44, 0xe4, 0xe5, 0x0d, 0x6a, 0x74, 0x1f, 0x2a, 0xd1, 0x4a, 0x27, 0xd9, 0xe9, 0x0f, - 0x39, 0x61, 0xe7, 0x99, 0xe2, 0x47, 0xcb, 0x9e, 0x64, 0xa7, 0x41, 0xe4, 0x84, 0x8d, 0x68, 0xf4, - 0x11, 0xac, 0xce, 0x97, 0x25, 0xc9, 0x0f, 0x87, 0xc8, 0x17, 0x68, 0x4d, 0xa3, 0x09, 0xa0, 0x05, - 0xe5, 0xcc, 0x05, 0xce, 0x8a, 0xc8, 0x17, 0xe9, 0x54, 0xa3, 0x01, 0xd4, 0x66, 0x6b, 0x84, 0xa4, - 0x67, 0x47, 0xe4, 0xc4, 0x5d, 0x6b, 0xfe, 0x94, 0x68, 0xd9, 0x90, 0xf4, 0x2c, 0x89, 0x9c, 0xb8, - 0x89, 0xdd, 0x6c, 0x7f, 0xfe, 0xd5, 0x5a, 0xfa, 0x8b, 0xaf, 0xd6, 0xd2, 0xff, 0xf8, 0x6a, 0x2d, - 0xfd, 0xe9, 0xd7, 0x6b, 0xa9, 0x2f, 0xbe, 0x5e, 0x4b, 0xfd, 0xf5, 0xeb, 0xb5, 0xd4, 0x4f, 0x5e, - 0x18, 0x1a, 0x64, 0x34, 0xed, 0x6d, 0xf4, 0xad, 0xc9, 0x66, 0xf8, 0x50, 0xde, 0xa2, 0x83, 0x82, - 0xbd, 0x02, 0x4b, 0xba, 0xaf, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xc2, 0x6e, 0x96, 0x48, - 0x28, 0x00, 0x00, + 0xf1, 0xe7, 0x9b, 0x44, 0x53, 0x7c, 0x68, 0x76, 0xbd, 0x4b, 0xc3, 0x6b, 0x69, 0x8d, 0x2d, 0xbf, + 0xd6, 0xb6, 0xf4, 0xb7, 0x5c, 0xeb, 0xbf, 0x1d, 0x3b, 0xb1, 0x45, 0x2e, 0xd7, 0x94, 0x57, 0x96, + 0x14, 0x88, 0x5a, 0xe7, 0xe5, 0x85, 0x41, 0x72, 0x44, 0xc2, 0x4b, 0x02, 0x30, 0x30, 0x94, 0xa5, + 0x3d, 0xa6, 0x92, 0x4a, 0x95, 0x73, 0x71, 0x55, 0x2e, 0xb9, 0xf8, 0x23, 0xe4, 0x9e, 0x4b, 0x72, + 0xc9, 0xc5, 0x55, 0x39, 0xc4, 0xc7, 0x1c, 0x52, 0x4e, 0xca, 0xce, 0x29, 0x5f, 0x20, 0xa7, 0x54, + 0x52, 0xf3, 0xc0, 0x8b, 0x24, 0x44, 0x68, 0x9d, 0x5b, 0x6e, 0x98, 0x46, 0x77, 0x03, 0xd3, 0x98, + 0xf9, 0x75, 0xff, 0x1a, 0x03, 0x4f, 0x10, 0x6c, 0x0e, 0xb0, 0x33, 0x31, 0x4c, 0xb2, 0xa9, 0xf7, + 0xfa, 0xc6, 0x26, 0x39, 0xb3, 0xb1, 0xbb, 0x61, 0x3b, 0x16, 0xb1, 0x50, 0x2d, 0xb8, 0xb9, 0x41, + 0x6f, 0xca, 0x4f, 0x86, 0xb4, 0xfb, 0xce, 0x99, 0x4d, 0xac, 0x4d, 0xdb, 0xb1, 0xac, 0x63, 0xae, + 0x2f, 0x5f, 0x0b, 0xdd, 0x66, 0x7e, 0xc2, 0xde, 0x22, 0x77, 0x85, 0xf1, 0x03, 0x7c, 0xe6, 0xdd, + 0x7d, 0x72, 0xce, 0xd6, 0xd6, 0x1d, 0x7d, 0xe2, 0xdd, 0x5e, 0x1f, 0x5a, 0xd6, 0x70, 0x8c, 0x37, + 0xd9, 0xa8, 0x37, 0x3d, 0xde, 0x24, 0xc6, 0x04, 0xbb, 0x44, 0x9f, 0xd8, 0x42, 0xe1, 0xf2, 0xd0, + 0x1a, 0x5a, 0xec, 0x72, 0x93, 0x5e, 0x71, 0xa9, 0xf2, 0x3b, 0x09, 0x8a, 0x2a, 0xfe, 0x78, 0x8a, + 0x5d, 0x82, 0xb6, 0x20, 0x87, 0xfb, 0x23, 0xab, 0x91, 0xbe, 0x9e, 0x7e, 0xae, 0xbc, 0x75, 0x6d, + 0x63, 0x66, 0x72, 0x1b, 0x42, 0xaf, 0xdd, 0x1f, 0x59, 0x9d, 0x94, 0xca, 0x74, 0xd1, 0x2d, 0xc8, + 0x1f, 0x8f, 0xa7, 0xee, 0xa8, 0x91, 0x61, 0x46, 0x4f, 0xc6, 0x19, 0xdd, 0xa1, 0x4a, 0x9d, 0x94, + 0xca, 0xb5, 0xe9, 0xa3, 0x0c, 0xf3, 0xd8, 0x6a, 0x64, 0xcf, 0x7f, 0xd4, 0x8e, 0x79, 0xcc, 0x1e, + 0x45, 0x75, 0x51, 0x13, 0xc0, 0xc5, 0x44, 0xb3, 0x6c, 0x62, 0x58, 0x66, 0x23, 0xc7, 0x2c, 0x9f, + 0x8a, 0xb3, 0x3c, 0xc4, 0x64, 0x9f, 0x29, 0x76, 0x52, 0xaa, 0xe4, 0x7a, 0x03, 0xea, 0xc3, 0x30, + 0x0d, 0xa2, 0xf5, 0x47, 0xba, 0x61, 0x36, 0xf2, 0xe7, 0xfb, 0xd8, 0x31, 0x0d, 0xd2, 0xa2, 0x8a, + 0xd4, 0x87, 0xe1, 0x0d, 0xe8, 0x94, 0x3f, 0x9e, 0x62, 0xe7, 0xac, 0x51, 0x38, 0x7f, 0xca, 0xdf, + 0xa7, 0x4a, 0x74, 0xca, 0x4c, 0x1b, 0xb5, 0xa1, 0xdc, 0xc3, 0x43, 0xc3, 0xd4, 0x7a, 0x63, 0xab, + 0xff, 0xa0, 0x51, 0x64, 0xc6, 0x4a, 0x9c, 0x71, 0x93, 0xaa, 0x36, 0xa9, 0x66, 0x27, 0xa5, 0x42, + 0xcf, 0x1f, 0xa1, 0x37, 0xa1, 0xd4, 0x1f, 0xe1, 0xfe, 0x03, 0x8d, 0x9c, 0x36, 0x4a, 0xcc, 0xc7, + 0x7a, 0x9c, 0x8f, 0x16, 0xd5, 0xeb, 0x9e, 0x76, 0x52, 0x6a, 0xb1, 0xcf, 0x2f, 0xe9, 0xfc, 0x07, + 0x78, 0x6c, 0x9c, 0x60, 0x87, 0xda, 0x4b, 0xe7, 0xcf, 0xff, 0x36, 0xd7, 0x64, 0x1e, 0xa4, 0x81, + 0x37, 0x40, 0x6f, 0x81, 0x84, 0xcd, 0x81, 0x98, 0x06, 0x30, 0x17, 0xd7, 0x63, 0xd7, 0x8a, 0x39, + 0xf0, 0x26, 0x51, 0xc2, 0xe2, 0x1a, 0xbd, 0x06, 0x85, 0xbe, 0x35, 0x99, 0x18, 0xa4, 0x51, 0x66, + 0xd6, 0x6b, 0xb1, 0x13, 0x60, 0x5a, 0x9d, 0x94, 0x2a, 0xf4, 0xd1, 0x1e, 0x54, 0xc7, 0x86, 0x4b, + 0x34, 0xd7, 0xd4, 0x6d, 0x77, 0x64, 0x11, 0xb7, 0xb1, 0xc2, 0x3c, 0x3c, 0x1d, 0xe7, 0x61, 0xd7, + 0x70, 0xc9, 0xa1, 0xa7, 0xdc, 0x49, 0xa9, 0x95, 0x71, 0x58, 0x40, 0xfd, 0x59, 0xc7, 0xc7, 0xd8, + 0xf1, 0x1d, 0x36, 0x2a, 0xe7, 0xfb, 0xdb, 0xa7, 0xda, 0x9e, 0x3d, 0xf5, 0x67, 0x85, 0x05, 0xe8, + 0xc7, 0x70, 0x69, 0x6c, 0xe9, 0x03, 0xdf, 0x9d, 0xd6, 0x1f, 0x4d, 0xcd, 0x07, 0x8d, 0x2a, 0x73, + 0xfa, 0x7c, 0xec, 0x4b, 0x5a, 0xfa, 0xc0, 0x73, 0xd1, 0xa2, 0x06, 0x9d, 0x94, 0xba, 0x3a, 0x9e, + 0x15, 0xa2, 0xfb, 0x70, 0x59, 0xb7, 0xed, 0xf1, 0xd9, 0xac, 0xf7, 0x1a, 0xf3, 0x7e, 0x33, 0xce, + 0xfb, 0x36, 0xb5, 0x99, 0x75, 0x8f, 0xf4, 0x39, 0x29, 0xea, 0x42, 0xdd, 0x76, 0xb0, 0xad, 0x3b, + 0x58, 0xb3, 0x1d, 0xcb, 0xb6, 0x5c, 0x7d, 0xdc, 0xa8, 0x33, 0xdf, 0xcf, 0xc6, 0xf9, 0x3e, 0xe0, + 0xfa, 0x07, 0x42, 0xbd, 0x93, 0x52, 0x6b, 0x76, 0x54, 0xc4, 0xbd, 0x5a, 0x7d, 0xec, 0xba, 0x81, + 0xd7, 0xd5, 0x65, 0x5e, 0x99, 0x7e, 0xd4, 0x6b, 0x44, 0xd4, 0x2c, 0x42, 0xfe, 0x44, 0x1f, 0x4f, + 0xb1, 0xf2, 0x2c, 0x94, 0x43, 0xb0, 0x84, 0x1a, 0x50, 0x9c, 0x60, 0xd7, 0xd5, 0x87, 0x98, 0xa1, + 0x98, 0xa4, 0x7a, 0x43, 0xa5, 0x0a, 0x2b, 0x61, 0x28, 0x52, 0x26, 0xbe, 0x21, 0x05, 0x19, 0x6a, + 0x78, 0x82, 0x1d, 0x97, 0x22, 0x8b, 0x30, 0x14, 0x43, 0x74, 0x03, 0x2a, 0x6c, 0xa9, 0x6b, 0xde, + 0x7d, 0x8a, 0x74, 0x39, 0x75, 0x85, 0x09, 0xef, 0x09, 0xa5, 0x75, 0x28, 0xdb, 0x5b, 0xb6, 0xaf, + 0x92, 0x65, 0x2a, 0x60, 0x6f, 0xd9, 0x42, 0x41, 0xf9, 0x0e, 0xd4, 0x67, 0x91, 0x09, 0xd5, 0x21, + 0xfb, 0x00, 0x9f, 0x89, 0xe7, 0xd1, 0x4b, 0x74, 0x59, 0x4c, 0x8b, 0x3d, 0x43, 0x52, 0xc5, 0x1c, + 0xff, 0x98, 0xf1, 0x8d, 0x7d, 0x48, 0x42, 0xaf, 0x41, 0x8e, 0x22, 0xbc, 0x00, 0x6b, 0x79, 0x83, + 0xc3, 0xff, 0x86, 0x07, 0xff, 0x1b, 0x5d, 0x0f, 0xfe, 0x9b, 0xa5, 0x2f, 0xbe, 0x5a, 0x4f, 0x7d, + 0xf6, 0xd7, 0xf5, 0xb4, 0xca, 0x2c, 0xd0, 0xe3, 0x14, 0x41, 0x74, 0xc3, 0xd4, 0x8c, 0x81, 0x78, + 0x4e, 0x91, 0x8d, 0x77, 0x06, 0xe8, 0x2e, 0xd4, 0xfb, 0x96, 0xe9, 0x62, 0xd3, 0x9d, 0xba, 0x1a, + 0x4f, 0x2f, 0x02, 0xa2, 0xe7, 0x77, 0x78, 0xcb, 0x53, 0x3c, 0x60, 0x7a, 0x6a, 0xad, 0x1f, 0x15, + 0xa0, 0x3b, 0x00, 0x27, 0xfa, 0xd8, 0x18, 0xe8, 0xc4, 0x72, 0xdc, 0x46, 0xee, 0x7a, 0x76, 0xa1, + 0x9b, 0x7b, 0x9e, 0xca, 0x91, 0x3d, 0xd0, 0x09, 0x6e, 0xe6, 0xe8, 0xdb, 0xaa, 0x21, 0x4b, 0xf4, + 0x0c, 0xd4, 0x74, 0xdb, 0xd6, 0x5c, 0xa2, 0x13, 0xac, 0xf5, 0xce, 0x08, 0x76, 0x19, 0x70, 0xaf, + 0xa8, 0x15, 0xdd, 0xb6, 0x0f, 0xa9, 0xb4, 0x49, 0x85, 0xe8, 0x69, 0xa8, 0x52, 0x90, 0x36, 0xf4, + 0xb1, 0x36, 0xc2, 0xc6, 0x70, 0x44, 0x18, 0x40, 0x67, 0xd5, 0x8a, 0x90, 0x76, 0x98, 0x50, 0x19, + 0xf8, 0x0b, 0x81, 0x01, 0x34, 0x42, 0x90, 0x1b, 0xe8, 0x44, 0x67, 0x81, 0x5c, 0x51, 0xd9, 0x35, + 0x95, 0xd9, 0x3a, 0x19, 0x89, 0xf0, 0xb0, 0x6b, 0x74, 0x05, 0x0a, 0xc2, 0x6d, 0x96, 0xb9, 0x15, + 0x23, 0xfa, 0xcd, 0x6c, 0xc7, 0x3a, 0xc1, 0x2c, 0x23, 0x95, 0x54, 0x3e, 0x50, 0x7e, 0x96, 0x81, + 0xd5, 0x39, 0x28, 0xa7, 0x7e, 0x47, 0xba, 0x3b, 0xf2, 0x9e, 0x45, 0xaf, 0xd1, 0xab, 0xd4, 0xaf, + 0x3e, 0xc0, 0x8e, 0x48, 0xa1, 0x8d, 0x70, 0x88, 0x78, 0x79, 0xd0, 0x61, 0xf7, 0x45, 0x68, 0x84, + 0x36, 0xda, 0x87, 0xfa, 0x58, 0x77, 0x89, 0xc6, 0xa1, 0x51, 0x0b, 0xa5, 0xd3, 0xf9, 0x84, 0xb0, + 0xab, 0x7b, 0x60, 0x4a, 0x17, 0xbb, 0x70, 0x54, 0x1d, 0x47, 0xa4, 0x48, 0x85, 0xcb, 0xbd, 0xb3, + 0x87, 0xba, 0x49, 0x0c, 0x13, 0x6b, 0x73, 0x5f, 0xee, 0xf1, 0x39, 0xa7, 0xed, 0x13, 0x63, 0x80, + 0xcd, 0xbe, 0xf7, 0xc9, 0x2e, 0xf9, 0xc6, 0xfe, 0x27, 0x75, 0x15, 0x15, 0xaa, 0xd1, 0x64, 0x84, + 0xaa, 0x90, 0x21, 0xa7, 0x22, 0x00, 0x19, 0x72, 0x8a, 0xfe, 0x0f, 0x72, 0x74, 0x92, 0x6c, 0xf2, + 0xd5, 0x05, 0x95, 0x80, 0xb0, 0xeb, 0x9e, 0xd9, 0x58, 0x65, 0x9a, 0x8a, 0xe2, 0xef, 0x06, 0x3f, + 0x41, 0xcd, 0x7a, 0x55, 0x9e, 0x87, 0xda, 0x4c, 0x06, 0x0a, 0x7d, 0xbf, 0x74, 0xf8, 0xfb, 0x29, + 0x35, 0xa8, 0x44, 0xd2, 0x8d, 0x72, 0x05, 0x2e, 0x2f, 0xca, 0x1e, 0xca, 0xc8, 0x97, 0x47, 0xb2, + 0x00, 0xba, 0x05, 0x25, 0x3f, 0x7d, 0xf0, 0xdd, 0x38, 0x1f, 0x2b, 0x4f, 0x59, 0xf5, 0x55, 0xe9, + 0x36, 0xa4, 0xcb, 0x9a, 0xad, 0x87, 0x0c, 0x7b, 0xf1, 0xa2, 0x6e, 0xdb, 0x1d, 0xdd, 0x1d, 0x29, + 0x1f, 0x42, 0x23, 0x2e, 0x35, 0xcc, 0x4c, 0x23, 0xe7, 0x2f, 0xc3, 0x2b, 0x50, 0x38, 0xb6, 0x9c, + 0x89, 0x4e, 0x98, 0xb3, 0x8a, 0x2a, 0x46, 0x74, 0x79, 0xf2, 0x34, 0x91, 0x65, 0x62, 0x3e, 0x50, + 0x34, 0x78, 0x3c, 0x36, 0x3d, 0x50, 0x13, 0xc3, 0x1c, 0x60, 0x1e, 0xcf, 0x8a, 0xca, 0x07, 0x81, + 0x23, 0xfe, 0xb2, 0x7c, 0x40, 0x1f, 0xeb, 0xb2, 0xb9, 0x32, 0xff, 0x92, 0x2a, 0x46, 0xca, 0xdf, + 0xd3, 0x70, 0x65, 0x71, 0x92, 0x40, 0xb7, 0x00, 0x38, 0xa0, 0xfa, 0xdb, 0xae, 0xbc, 0x75, 0x65, + 0x7e, 0xd1, 0xdf, 0xd6, 0x89, 0xae, 0x4a, 0x4c, 0x93, 0x5e, 0x52, 0x18, 0x08, 0xcc, 0x34, 0xd7, + 0x78, 0xc8, 0xd7, 0x4c, 0x56, 0xad, 0xf8, 0x3a, 0x87, 0xc6, 0xc3, 0x28, 0xbc, 0x65, 0xa3, 0xf0, + 0x16, 0xc4, 0x2e, 0x17, 0xd9, 0xc2, 0x1e, 0x96, 0xe6, 0x2f, 0x8a, 0xa5, 0xca, 0x2f, 0xc2, 0xd3, + 0x8c, 0xa4, 0xa8, 0xd0, 0xbe, 0x4e, 0x5f, 0x68, 0x5f, 0x47, 0xc3, 0x93, 0x49, 0x18, 0x1e, 0xe5, + 0x57, 0x00, 0x25, 0x15, 0xbb, 0x36, 0x05, 0x61, 0xd4, 0x04, 0x09, 0x9f, 0xf6, 0x31, 0xaf, 0x94, + 0xd3, 0xb1, 0x95, 0x26, 0xd7, 0x6e, 0x7b, 0x9a, 0xb4, 0xcc, 0xf3, 0xcd, 0xd0, 0x2b, 0x82, 0x0d, + 0xc4, 0x17, 0xf6, 0xc2, 0x3c, 0x4c, 0x07, 0x5e, 0xf5, 0xe8, 0x40, 0x36, 0xb6, 0xb2, 0xe3, 0x56, + 0x33, 0x7c, 0xe0, 0x15, 0xc1, 0x07, 0x72, 0x4b, 0x1e, 0x16, 0x21, 0x04, 0xad, 0x08, 0x21, 0xc8, + 0x2f, 0x99, 0x66, 0x0c, 0x23, 0x68, 0x45, 0x18, 0x41, 0x61, 0x89, 0x93, 0x18, 0x4a, 0xf0, 0xaa, + 0x47, 0x09, 0x8a, 0x4b, 0xa6, 0x3d, 0xc3, 0x09, 0xee, 0x44, 0x39, 0x01, 0xaf, 0xe7, 0x6f, 0xc4, + 0x5a, 0xc7, 0x92, 0x82, 0xef, 0x86, 0x48, 0x81, 0x14, 0x5b, 0x91, 0x73, 0x27, 0x0b, 0x58, 0x41, + 0x2b, 0xc2, 0x0a, 0x60, 0x49, 0x0c, 0x62, 0x68, 0xc1, 0xdb, 0x61, 0x5a, 0x50, 0x8e, 0x65, 0x16, + 0x62, 0xd1, 0x2c, 0xe2, 0x05, 0xaf, 0xfb, 0xbc, 0x60, 0x25, 0x96, 0xd8, 0x88, 0x39, 0xcc, 0x12, + 0x83, 0xfd, 0x39, 0x62, 0xc0, 0x0b, 0xf9, 0x67, 0x62, 0x5d, 0x2c, 0x61, 0x06, 0xfb, 0x73, 0xcc, + 0xa0, 0xba, 0xc4, 0xe1, 0x12, 0x6a, 0xf0, 0x93, 0xc5, 0xd4, 0x20, 0xbe, 0x78, 0x17, 0xaf, 0x99, + 0x8c, 0x1b, 0x68, 0x31, 0xdc, 0x80, 0xd7, 0xef, 0x2f, 0xc4, 0xba, 0x4f, 0x4c, 0x0e, 0x8e, 0x16, + 0x90, 0x03, 0x5e, 0xc6, 0x3f, 0x17, 0xeb, 0x3c, 0x01, 0x3b, 0x38, 0x5a, 0xc0, 0x0e, 0xd0, 0x52, + 0xb7, 0xc9, 0xe9, 0xc1, 0xf3, 0xb4, 0x0a, 0x9b, 0x81, 0x39, 0x9a, 0xc9, 0xb0, 0xe3, 0x58, 0x8e, + 0xa8, 0xbc, 0xf9, 0x40, 0x79, 0x8e, 0xd6, 0x85, 0x01, 0xa4, 0x9d, 0x43, 0x25, 0x58, 0xc5, 0x10, + 0x82, 0x31, 0xe5, 0xb7, 0xe9, 0xc0, 0x96, 0x95, 0x52, 0xe1, 0x9a, 0x52, 0x12, 0x35, 0x65, 0x88, + 0x61, 0x64, 0xa2, 0x0c, 0x63, 0x1d, 0xca, 0xb4, 0x12, 0x98, 0x21, 0x0f, 0xba, 0xed, 0x91, 0x07, + 0x74, 0x13, 0x56, 0x59, 0xa9, 0xc7, 0xf3, 0x42, 0x24, 0x85, 0xd5, 0xe8, 0x0d, 0xbe, 0x95, 0x78, + 0x2e, 0x7b, 0x09, 0x2e, 0x85, 0x74, 0xfd, 0x0a, 0x83, 0x57, 0xcc, 0x75, 0x5f, 0x7b, 0x5b, 0x94, + 0x1a, 0xef, 0x05, 0x01, 0x0a, 0x88, 0x09, 0x82, 0x5c, 0xdf, 0x1a, 0x60, 0x91, 0xff, 0xd9, 0x35, + 0x25, 0x2b, 0x63, 0x6b, 0x28, 0x32, 0x2a, 0xbd, 0xa4, 0x5a, 0x3e, 0x66, 0x4b, 0x1c, 0x92, 0x95, + 0x3f, 0xa4, 0x03, 0x7f, 0x01, 0x57, 0x59, 0x44, 0x2b, 0xd2, 0xff, 0x1d, 0x5a, 0x91, 0x79, 0x64, + 0x5a, 0x11, 0xae, 0xbf, 0xb2, 0xd1, 0xfa, 0xeb, 0x9f, 0xe9, 0xe0, 0x0b, 0xfb, 0x24, 0xe1, 0xd1, + 0x22, 0x12, 0x14, 0x53, 0x79, 0xf6, 0xbd, 0x44, 0x31, 0x25, 0xa8, 0x5f, 0x81, 0x3d, 0x37, 0x4a, + 0xfd, 0x8a, 0xbc, 0xbc, 0x62, 0x03, 0xf4, 0x1a, 0x48, 0xac, 0x7f, 0xa8, 0x59, 0xb6, 0x2b, 0xd2, + 0xc3, 0x13, 0xe1, 0xb9, 0xf2, 0x36, 0xe1, 0xc6, 0x01, 0xd5, 0xd9, 0xb7, 0x5d, 0xb5, 0x64, 0x8b, + 0xab, 0x50, 0xad, 0x23, 0x45, 0x6a, 0x9d, 0x6b, 0x20, 0xd1, 0xb7, 0x77, 0x6d, 0xbd, 0x8f, 0x19, + 0xd4, 0x4b, 0x6a, 0x20, 0x50, 0xee, 0x03, 0x9a, 0x4f, 0x36, 0xa8, 0x03, 0x05, 0x7c, 0x82, 0x4d, + 0x42, 0xbf, 0x5a, 0x76, 0xb6, 0x1c, 0x11, 0x5c, 0x00, 0x9b, 0xa4, 0xd9, 0xa0, 0x41, 0xfe, 0xc7, + 0x57, 0xeb, 0x75, 0xae, 0xfd, 0xa2, 0x35, 0x31, 0x08, 0x9e, 0xd8, 0xe4, 0x4c, 0x15, 0xf6, 0xca, + 0x5f, 0x32, 0xb4, 0x30, 0x8f, 0x24, 0xa2, 0x85, 0xb1, 0xf5, 0x36, 0x50, 0x26, 0x44, 0xca, 0x92, + 0xc5, 0x7b, 0x0d, 0x60, 0xa8, 0xbb, 0xda, 0x27, 0xba, 0x49, 0xf0, 0x40, 0x04, 0x3d, 0x24, 0x41, + 0x32, 0x94, 0xe8, 0x68, 0xea, 0xe2, 0x81, 0xe0, 0x87, 0xfe, 0x38, 0x34, 0xcf, 0xe2, 0xb7, 0x9b, + 0x67, 0x34, 0xca, 0xa5, 0x99, 0x28, 0x87, 0x8a, 0x66, 0x29, 0x5c, 0x34, 0xd3, 0x77, 0xb3, 0x1d, + 0xc3, 0x72, 0x0c, 0x72, 0xc6, 0x3e, 0x4d, 0x56, 0xf5, 0xc7, 0xe8, 0x06, 0x54, 0x26, 0x78, 0x62, + 0x5b, 0xd6, 0x58, 0xe3, 0xe0, 0x55, 0x66, 0xa6, 0x2b, 0x42, 0xd8, 0x66, 0x18, 0xf6, 0xf3, 0x4c, + 0xb0, 0xfd, 0x02, 0x72, 0xf4, 0x3f, 0x17, 0x60, 0xe5, 0x97, 0xac, 0x63, 0x12, 0x2d, 0x35, 0xd0, + 0x21, 0xac, 0xfa, 0xdb, 0x5f, 0x9b, 0x32, 0x58, 0xf0, 0x16, 0x74, 0x52, 0xfc, 0xa8, 0x9f, 0x44, + 0xc5, 0x2e, 0xfa, 0x01, 0x5c, 0x9d, 0x81, 0x36, 0xdf, 0x75, 0x26, 0x21, 0xc2, 0x3d, 0x16, 0x45, + 0x38, 0xcf, 0x73, 0x10, 0xab, 0xec, 0xb7, 0xdc, 0x74, 0x3b, 0x94, 0x84, 0x87, 0x0b, 0xa7, 0x85, + 0x5f, 0xff, 0x06, 0x54, 0x1c, 0x4c, 0x28, 0x71, 0x8a, 0xb4, 0x39, 0x56, 0xb8, 0x50, 0x34, 0x4f, + 0x0e, 0xe0, 0xb1, 0x85, 0x05, 0x14, 0xfa, 0x7f, 0x90, 0x82, 0xda, 0x2b, 0x1d, 0xd3, 0x31, 0xf0, + 0x59, 0x70, 0xa0, 0xab, 0xfc, 0x3e, 0x1d, 0xb8, 0x8c, 0xf2, 0xea, 0x36, 0x14, 0x1c, 0xec, 0x4e, + 0xc7, 0x9c, 0xe9, 0x56, 0xb7, 0x5e, 0x4a, 0x56, 0x7a, 0x51, 0xe9, 0x74, 0x4c, 0x54, 0x61, 0xac, + 0xdc, 0x87, 0x02, 0x97, 0xa0, 0x32, 0x14, 0x8f, 0xf6, 0xee, 0xee, 0xed, 0xbf, 0xbf, 0x57, 0x4f, + 0x21, 0x80, 0xc2, 0x76, 0xab, 0xd5, 0x3e, 0xe8, 0xd6, 0xd3, 0x48, 0x82, 0xfc, 0x76, 0x73, 0x5f, + 0xed, 0xd6, 0x33, 0x54, 0xac, 0xb6, 0xdf, 0x6d, 0xb7, 0xba, 0xf5, 0x2c, 0x5a, 0x85, 0x0a, 0xbf, + 0xd6, 0xee, 0xec, 0xab, 0xef, 0x6d, 0x77, 0xeb, 0xb9, 0x90, 0xe8, 0xb0, 0xbd, 0x77, 0xbb, 0xad, + 0xd6, 0xf3, 0xca, 0xcb, 0x94, 0x4a, 0xc7, 0x14, 0x6b, 0x01, 0x69, 0x4e, 0x87, 0x48, 0xb3, 0xf2, + 0xeb, 0x0c, 0xc8, 0xf1, 0x15, 0x18, 0x7a, 0x77, 0x66, 0xe2, 0x5b, 0x17, 0x28, 0xdf, 0x66, 0x66, + 0x8f, 0x9e, 0x86, 0xaa, 0x83, 0x8f, 0x31, 0xe9, 0x8f, 0x78, 0x45, 0xc8, 0x33, 0x66, 0x45, 0xad, + 0x08, 0x29, 0x33, 0x72, 0xb9, 0xda, 0x47, 0xb8, 0x4f, 0x34, 0x0e, 0x45, 0x7c, 0xd1, 0x49, 0x54, + 0x8d, 0x4a, 0x0f, 0xb9, 0x50, 0xf9, 0xf0, 0x42, 0xb1, 0x94, 0x20, 0xaf, 0xb6, 0xbb, 0xea, 0x0f, + 0xeb, 0x59, 0x84, 0xa0, 0xca, 0x2e, 0xb5, 0xc3, 0xbd, 0xed, 0x83, 0xc3, 0xce, 0x3e, 0x8d, 0xe5, + 0x25, 0xa8, 0x79, 0xb1, 0xf4, 0x84, 0x79, 0xe5, 0x00, 0xae, 0xc6, 0x94, 0x8f, 0x8f, 0xd8, 0x37, + 0x50, 0x7e, 0x93, 0x0e, 0xbb, 0x8c, 0x72, 0xf4, 0x77, 0x66, 0x22, 0xbd, 0x99, 0xb4, 0xe8, 0x9c, + 0x0d, 0xb3, 0x0c, 0x25, 0x2c, 0xda, 0x61, 0x2c, 0xc0, 0x2b, 0xaa, 0x3f, 0x56, 0x5e, 0x5a, 0x1e, + 0xb4, 0x60, 0xd5, 0x65, 0x94, 0x7f, 0xa7, 0xa1, 0x36, 0x03, 0x11, 0x68, 0x0b, 0xf2, 0x9c, 0x57, + 0xc5, 0xfd, 0x9a, 0x63, 0x08, 0x27, 0xf0, 0x84, 0xab, 0xa2, 0x37, 0x23, 0xaf, 0x34, 0x07, 0x45, + 0x3c, 0x58, 0x5e, 0x0f, 0x4f, 0x98, 0xfa, 0x16, 0xe8, 0x2d, 0x90, 0x7c, 0xac, 0x13, 0x64, 0xfe, + 0xa9, 0x79, 0x73, 0x1f, 0x25, 0x85, 0x7d, 0x60, 0x83, 0x5e, 0x0f, 0xca, 0xdd, 0xdc, 0x3c, 0x9b, + 0x13, 0xe6, 0x5c, 0x41, 0x18, 0x7b, 0xfa, 0x4a, 0x0b, 0xca, 0xa1, 0xf9, 0xa0, 0x27, 0x40, 0x9a, + 0xe8, 0xa7, 0xa2, 0xf3, 0xcb, 0x7b, 0x77, 0xa5, 0x89, 0x7e, 0xca, 0x9b, 0xbe, 0x57, 0xa1, 0x48, + 0x6f, 0x0e, 0x75, 0x57, 0x74, 0x83, 0x0a, 0x13, 0xfd, 0xf4, 0x1d, 0xdd, 0x55, 0x3e, 0x80, 0x6a, + 0xb4, 0xeb, 0x49, 0xf7, 0xa2, 0x63, 0x4d, 0xcd, 0x01, 0xf3, 0x91, 0x57, 0xf9, 0x00, 0xdd, 0x82, + 0xfc, 0x89, 0xc5, 0xe1, 0x7a, 0x31, 0x68, 0xdd, 0xb3, 0x08, 0x0e, 0x75, 0x4d, 0xb9, 0xb6, 0xf2, + 0x10, 0xf2, 0x0c, 0x7e, 0x29, 0x94, 0xb2, 0xfe, 0xa5, 0x28, 0xf5, 0xe9, 0x35, 0xfa, 0x00, 0x40, + 0x27, 0xc4, 0x31, 0x7a, 0xd3, 0xc0, 0xf1, 0xfa, 0x62, 0xf8, 0xde, 0xf6, 0xf4, 0x9a, 0xd7, 0x04, + 0x8e, 0x5f, 0x0e, 0x4c, 0x43, 0x58, 0x1e, 0x72, 0xa8, 0xec, 0x41, 0x35, 0x6a, 0x1b, 0xfe, 0x93, + 0xb0, 0xb2, 0xe0, 0x4f, 0x82, 0x5f, 0x4e, 0xfa, 0xc5, 0x68, 0x96, 0xf7, 0xaa, 0xd9, 0x40, 0xf9, + 0x34, 0x0d, 0xa5, 0xee, 0xa9, 0x58, 0xa3, 0x31, 0x6d, 0xd2, 0xc0, 0x34, 0x13, 0x6e, 0x0a, 0xf2, + 0xbe, 0x6b, 0xd6, 0xef, 0xe6, 0xbe, 0xed, 0x6f, 0xa8, 0x5c, 0xd2, 0x2e, 0x82, 0xd7, 0xfe, 0x12, + 0x70, 0xfd, 0x06, 0x48, 0xfe, 0xaa, 0xa2, 0x9c, 0x49, 0x1f, 0x0c, 0x1c, 0xec, 0xba, 0x62, 0x6e, + 0xde, 0x90, 0x75, 0xdd, 0xad, 0x4f, 0x44, 0xdb, 0x31, 0xab, 0xf2, 0x81, 0x32, 0x80, 0xda, 0x4c, + 0xe2, 0x46, 0x6f, 0x40, 0xd1, 0x9e, 0xf6, 0x34, 0x2f, 0x3c, 0x33, 0x9b, 0xc7, 0xab, 0x9f, 0xa7, + 0xbd, 0xb1, 0xd1, 0xbf, 0x8b, 0xcf, 0xbc, 0x97, 0xb1, 0xa7, 0xbd, 0xbb, 0x3c, 0x8a, 0xfc, 0x29, + 0x99, 0xf0, 0x53, 0x4e, 0xa0, 0xe4, 0x2d, 0x0a, 0xf4, 0xbd, 0xf0, 0x3e, 0xf1, 0xfe, 0xc5, 0xc4, + 0x16, 0x13, 0xc2, 0x7d, 0x68, 0x9b, 0xdc, 0x84, 0x55, 0xd7, 0x18, 0x9a, 0x78, 0xa0, 0x05, 0xac, + 0x8d, 0x3d, 0xad, 0xa4, 0xd6, 0xf8, 0x8d, 0x5d, 0x8f, 0xb2, 0x29, 0xff, 0x4a, 0x43, 0xc9, 0xdb, + 0xb0, 0xe8, 0xe5, 0xd0, 0xba, 0xab, 0x2e, 0xe8, 0x98, 0x79, 0x8a, 0x41, 0xe3, 0x3c, 0xfa, 0xae, + 0x99, 0x8b, 0xbf, 0x6b, 0xdc, 0x1f, 0x10, 0xaf, 0x7d, 0x9a, 0xbb, 0xf0, 0xaf, 0xa8, 0x17, 0x01, + 0x11, 0x8b, 0xe8, 0x63, 0xed, 0xc4, 0x22, 0x86, 0x39, 0xd4, 0x78, 0xb0, 0x79, 0x4d, 0x59, 0x67, + 0x77, 0xee, 0xb1, 0x1b, 0x07, 0x2c, 0xee, 0x3f, 0x4d, 0x43, 0xc9, 0xaf, 0x0e, 0x2e, 0xda, 0x07, + 0xbf, 0x02, 0x05, 0x91, 0x00, 0x79, 0x23, 0x5c, 0x8c, 0xfc, 0x5f, 0x32, 0xb9, 0xd0, 0x2f, 0x19, + 0x19, 0x4a, 0x13, 0x4c, 0x74, 0x96, 0x67, 0x38, 0x71, 0xf6, 0xc7, 0x37, 0x5f, 0x87, 0x72, 0xe8, + 0x97, 0x04, 0xdd, 0x79, 0x7b, 0xed, 0xf7, 0xeb, 0x29, 0xb9, 0xf8, 0xe9, 0xe7, 0xd7, 0xb3, 0x7b, + 0xf8, 0x13, 0xba, 0x66, 0xd5, 0x76, 0xab, 0xd3, 0x6e, 0xdd, 0xad, 0xa7, 0xe5, 0xf2, 0xa7, 0x9f, + 0x5f, 0x2f, 0xaa, 0x98, 0x35, 0xda, 0x6e, 0x76, 0x60, 0x25, 0xfc, 0x55, 0xa2, 0xe9, 0x00, 0x41, + 0xf5, 0xf6, 0xd1, 0xc1, 0xee, 0x4e, 0x6b, 0xbb, 0xdb, 0xd6, 0xee, 0xed, 0x77, 0xdb, 0xf5, 0x34, + 0xba, 0x0a, 0x97, 0x76, 0x77, 0xde, 0xe9, 0x74, 0xb5, 0xd6, 0xee, 0x4e, 0x7b, 0xaf, 0xab, 0x6d, + 0x77, 0xbb, 0xdb, 0xad, 0xbb, 0xf5, 0xcc, 0xd6, 0x9f, 0xca, 0x50, 0xdb, 0x6e, 0xb6, 0x76, 0x68, + 0xfe, 0x37, 0xfa, 0xba, 0x68, 0x64, 0xe6, 0x58, 0xdf, 0xe2, 0xdc, 0x73, 0x1b, 0xf2, 0xf9, 0x7d, + 0x5c, 0x74, 0x07, 0xf2, 0xac, 0xa5, 0x81, 0xce, 0x3f, 0xc8, 0x21, 0x2f, 0x69, 0xec, 0xd2, 0x97, + 0x61, 0xdb, 0xe3, 0xdc, 0x93, 0x1d, 0xf2, 0xf9, 0x7d, 0x5e, 0xa4, 0x82, 0x14, 0xf4, 0x24, 0x96, + 0x9f, 0xf4, 0x90, 0x13, 0xf4, 0x7e, 0xa9, 0xcf, 0x80, 0x18, 0x2d, 0x3f, 0xf9, 0x20, 0x27, 0x00, + 0x30, 0xb4, 0x0b, 0x45, 0x8f, 0xcb, 0x2e, 0x3b, 0x8b, 0x21, 0x2f, 0xed, 0xcb, 0xd2, 0x4f, 0xc0, + 0x7b, 0x0e, 0xe7, 0x1f, 0x2c, 0x91, 0x97, 0x34, 0x99, 0xd1, 0x0e, 0x14, 0x44, 0xb5, 0xbf, 0xe4, + 0x7c, 0x85, 0xbc, 0xac, 0xcf, 0x4a, 0x83, 0x16, 0x34, 0x73, 0x96, 0x1f, 0x97, 0x91, 0x13, 0xf4, + 0xcf, 0xd1, 0x11, 0x40, 0xa8, 0xc3, 0x90, 0xe0, 0x1c, 0x8c, 0x9c, 0xa4, 0x2f, 0x8e, 0xf6, 0xa1, + 0xe4, 0x13, 0xbe, 0xa5, 0xa7, 0x52, 0xe4, 0xe5, 0x0d, 0x6a, 0x74, 0x1f, 0x2a, 0x51, 0xa6, 0x93, + 0xec, 0xac, 0x89, 0x9c, 0xb0, 0xf3, 0x4c, 0xfd, 0x47, 0x69, 0x4f, 0xb2, 0xb3, 0x27, 0x72, 0xc2, + 0x46, 0x34, 0xfa, 0x08, 0x56, 0xe7, 0x69, 0x49, 0xf2, 0xa3, 0x28, 0xf2, 0x05, 0x5a, 0xd3, 0x68, + 0x02, 0x68, 0x01, 0x9d, 0xb9, 0xc0, 0xc9, 0x14, 0xf9, 0x22, 0x9d, 0x6a, 0x34, 0x80, 0xda, 0x2c, + 0x47, 0x48, 0x7a, 0x52, 0x45, 0x4e, 0xdc, 0xb5, 0xe6, 0x4f, 0x89, 0xd2, 0x86, 0xa4, 0x27, 0x57, + 0xe4, 0xc4, 0x4d, 0xec, 0x66, 0xfb, 0x8b, 0xaf, 0xd7, 0xd2, 0x5f, 0x7e, 0xbd, 0x96, 0xfe, 0xdb, + 0xd7, 0x6b, 0xe9, 0xcf, 0xbe, 0x59, 0x4b, 0x7d, 0xf9, 0xcd, 0x5a, 0xea, 0xcf, 0xdf, 0xac, 0xa5, + 0x7e, 0xf4, 0xc2, 0xd0, 0x20, 0xa3, 0x69, 0x6f, 0xa3, 0x6f, 0x4d, 0x36, 0xc3, 0x47, 0x00, 0x17, + 0x1d, 0x4b, 0xec, 0x15, 0x58, 0xd2, 0x7d, 0xe5, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x48, 0xb2, + 0x05, 0xf3, 0xb6, 0x28, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5357,6 +5388,26 @@ func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + n23, err23 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err23 != nil { + return 0, err23 + } + i -= n23 + i = encodeVarintTypes(dAtA, i, uint64(n23)) + i-- + dAtA[i] = 0x2a + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x20 + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x1a + } if m.BlockDataSize != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.BlockDataSize)) i-- @@ -6597,20 +6648,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA48 := make([]byte, len(m.RefetchChunks)*10) - var j47 int + dAtA49 := make([]byte, len(m.RefetchChunks)*10) + var j48 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA48[j47] = uint8(uint64(num)&0x7f | 0x80) + dAtA49[j48] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j47++ + j48++ } - dAtA48[j47] = uint8(num) - j47++ + dAtA49[j48] = uint8(num) + j48++ } - i -= j47 - copy(dAtA[i:], dAtA48[:j47]) - i = encodeVarintTypes(dAtA, i, uint64(j47)) + i -= j48 + copy(dAtA[i:], dAtA49[:j48]) + i = encodeVarintTypes(dAtA, i, uint64(j48)) i-- dAtA[i] = 0x12 } @@ -7122,12 +7173,12 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n57, err57 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err57 != nil { - return 0, err57 + n58, err58 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err58 != nil { + return 0, err58 } - i -= n57 - i = encodeVarintTypes(dAtA, i, uint64(n57)) + i -= n58 + i = encodeVarintTypes(dAtA, i, uint64(n58)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -7695,6 +7746,15 @@ func (m *RequestPrepareProposal) Size() (n int) { if m.BlockDataSize != 0 { n += 1 + sovTypes(uint64(m.BlockDataSize)) } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) return n } @@ -10937,6 +10997,90 @@ func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/cmd/cometbft/commands/debug/kill.go b/cmd/cometbft/commands/debug/kill.go index 8e51e28993..8964bb9a13 100644 --- a/cmd/cometbft/commands/debug/kill.go +++ b/cmd/cometbft/commands/debug/kill.go @@ -33,7 +33,7 @@ $ cometbft debug 34255 /path/to/tm-debug.zip`, } func killCmdHandler(cmd *cobra.Command, args []string) error { - pid, err := strconv.ParseUint(args[0], 10, 64) + pid, err := strconv.Atoi(args[0]) if err != nil { return err } @@ -100,7 +100,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // is tailed and piped to a file under the directory dir. An error is returned // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. -func killProc(pid uint64, dir string) error { +func killProc(pid int, dir string) error { // pipe STDERR output from tailing the CometBFT process to a file // // NOTE: This will only work on UNIX systems. @@ -123,7 +123,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the CometBFT process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(int(pid)) + p, err := os.FindProcess(pid) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill CometBFT process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { diff --git a/cmd/cometbft/commands/root_test.go b/cmd/cometbft/commands/root_test.go index 72604240ce..4c22b56516 100644 --- a/cmd/cometbft/commands/root_test.go +++ b/cmd/cometbft/commands/root_test.go @@ -17,28 +17,12 @@ import ( cmtos "github.com/tendermint/tendermint/libs/os" ) -var defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") - // clearConfig clears env vars, the given root dir, and resets viper. -func clearConfig(dir string) { - if err := os.Unsetenv("CMTHOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("CMT_HOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("TMHOME"); err != nil { - //XXX: Deprecated. - panic(err) - } - if err := os.Unsetenv("TM_HOME"); err != nil { - //XXX: Deprecated. - panic(err) - } +func clearConfig(t *testing.T, dir string) { + os.Clearenv() + err := os.RemoveAll(dir) + require.NoError(t, err) - if err := os.RemoveAll(dir); err != nil { - panic(err) - } viper.Reset() config = cfg.DefaultConfig() } @@ -56,11 +40,11 @@ func testRootCmd() *cobra.Command { return rootCmd } -func testSetup(rootDir string, args []string, env map[string]string) error { - clearConfig(defaultRoot) +func testSetup(t *testing.T, root string, args []string, env map[string]string) error { + clearConfig(t, root) rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "CMT", defaultRoot) + cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) // run with the args and env args = append([]string{rootCmd.Use}, args...) @@ -68,22 +52,27 @@ func testSetup(rootDir string, args []string, env map[string]string) error { } func TestRootHome(t *testing.T) { - newRoot := filepath.Join(defaultRoot, "something-else") + tmpDir := os.TempDir() + root := filepath.Join(tmpDir, "adir") + newRoot := filepath.Join(tmpDir, "something-else") + defer clearConfig(t, root) + defer clearConfig(t, newRoot) + cases := []struct { args []string env map[string]string root string }{ - {nil, nil, defaultRoot}, + {nil, nil, root}, {[]string{"--home", newRoot}, nil, newRoot}, {nil, map[string]string{"TMHOME": newRoot}, newRoot}, //XXX: Deprecated. {nil, map[string]string{"CMTHOME": newRoot}, newRoot}, } for i, tc := range cases { - idxString := strconv.Itoa(i) + idxString := "idx: " + strconv.Itoa(i) - err := testSetup(defaultRoot, tc.args, tc.env) + err := testSetup(t, root, tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.root, config.RootDir, idxString) @@ -115,8 +104,10 @@ func TestRootFlagsEnv(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - - err := testSetup(defaultRoot, tc.args, tc.env) + root := filepath.Join(os.TempDir(), "adir2_"+idxString) + idxString = "idx: " + idxString + defer clearConfig(t, root) + err := testSetup(t, root, tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.logLevel, config.LogLevel, idxString) @@ -144,10 +135,11 @@ func TestRootConfig(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - clearConfig(defaultRoot) - + root := filepath.Join(os.TempDir(), "adir3_"+idxString) + idxString = "idx: " + idxString + defer clearConfig(t, root) // XXX: path must match cfg.defaultConfigPath - configFilePath := filepath.Join(defaultRoot, "config") + configFilePath := filepath.Join(root, "config") err := cmtos.EnsureDir(configFilePath, 0o700) require.Nil(t, err) @@ -157,7 +149,7 @@ func TestRootConfig(t *testing.T) { require.Nil(t, err) rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "CMT", defaultRoot) + cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) // run with the args and env tc.args = append([]string{rootCmd.Use}, tc.args...) diff --git a/cmd/cometbft/commands/run_node.go b/cmd/cometbft/commands/run_node.go index 1d79e21b57..58b40b2bed 100644 --- a/cmd/cometbft/commands/run_node.go +++ b/cmd/cometbft/commands/run_node.go @@ -107,6 +107,17 @@ func AddNodeFlags(cmd *cobra.Command) { trace.FlagInfluxDBTokenDescription, ) + cmd.PersistentFlags().String( + trace.FlagPyroscopeURL, + config.Instrumentation.PyroscopeURL, + trace.FlagPyroscopeURLDescription, + ) + + cmd.PersistentFlags().Bool( + trace.FlagPyroscopeTrace, + config.Instrumentation.PyroscopeTrace, + trace.FlagPyroscopeTraceDescription, + ) } // NewRunNodeCmd returns the command that allows the CLI to start a node. diff --git a/config/config.go b/config/config.go index 131d205070..15e7f35925 100644 --- a/config/config.go +++ b/config/config.go @@ -61,6 +61,12 @@ var ( minSubscriptionBufferSize = 100 defaultSubscriptionBufferSize = 200 + + // DefaultInfluxTables is a list of tables that are used for storing traces. + // This global var is filled by an init function in the schema package. This + // allows for the schema package to contain all the relevant logic while + // avoiding import cycles. + DefaultInfluxTables = []string{} ) // Config defines the top level configuration for a CometBFT node @@ -944,10 +950,11 @@ type ConsensusConfig struct { TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"` // How much the timeout_precommit increases with each round TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"` - // TargetHeigtDuration is used to determine how long we wait after a - // block is committed. If this time is shorter than the actual time to reach - // consensus for that height, then we do not wait at all. - TargetHeightDuration time.Duration `mapstructure:"target_height_duration"` + // How long we wait after committing a block, before starting on the new + // height (this gives us a chance to receive some more precommits, even + // though we already have +2/3). + // NOTE: when modifying, make sure to update time_iota_ms genesis parameter + TimeoutCommit time.Duration `mapstructure:"timeout_commit"` // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` @@ -967,13 +974,13 @@ type ConsensusConfig struct { func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), - TimeoutPropose: 2000 * time.Millisecond, + TimeoutPropose: 3000 * time.Millisecond, TimeoutProposeDelta: 500 * time.Millisecond, TimeoutPrevote: 1000 * time.Millisecond, TimeoutPrevoteDelta: 500 * time.Millisecond, TimeoutPrecommit: 1000 * time.Millisecond, TimeoutPrecommitDelta: 500 * time.Millisecond, - TargetHeightDuration: 3000 * time.Millisecond, + TimeoutCommit: 1000 * time.Millisecond, SkipTimeoutCommit: false, CreateEmptyBlocks: true, CreateEmptyBlocksInterval: 0 * time.Second, @@ -993,7 +1000,7 @@ func TestConsensusConfig() *ConsensusConfig { cfg.TimeoutPrecommit = 10 * time.Millisecond cfg.TimeoutPrecommitDelta = 1 * time.Millisecond // NOTE: when modifying, make sure to update time_iota_ms (testGenesisFmt) in toml.go - cfg.TargetHeightDuration = 70 * time.Millisecond + cfg.TimeoutCommit = 10 * time.Millisecond cfg.SkipTimeoutCommit = true cfg.PeerGossipSleepDuration = 5 * time.Millisecond cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond @@ -1027,14 +1034,10 @@ func (cfg *ConsensusConfig) Precommit(round int32) time.Duration { ) * time.Nanosecond } -// NextStartTime adds the TargetHeightDuration to the provided starting time. -func (cfg *ConsensusConfig) NextStartTime(t time.Time) time.Time { - newStartTime := t.Add(cfg.TargetHeightDuration) - now := time.Now() - if newStartTime.Before(now) { - return now - } - return newStartTime +// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits +// for a single block (ie. a commit). +func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { + return t.Add(cfg.TimeoutCommit) } // WalFile returns the full path to the write-ahead log file @@ -1071,8 +1074,8 @@ func (cfg *ConsensusConfig) ValidateBasic() error { if cfg.TimeoutPrecommitDelta < 0 { return errors.New("timeout_precommit_delta can't be negative") } - if cfg.TargetHeightDuration < 0 { - return errors.New("target_height_duration can't be negative") + if cfg.TimeoutCommit < 0 { + return errors.New("timeout_commit can't be negative") } if cfg.CreateEmptyBlocksInterval < 0 { return errors.New("create_empty_blocks_interval can't be negative") @@ -1193,6 +1196,23 @@ type InstrumentationConfig struct { // InfluxBatchSize is the number of points to write in a single batch. InfluxBatchSize int `mapstructure:"influx_batch_size"` + + // InfluxTables is the list of tables that will be traced. See the + // pkg/trace/schema for a complete list of tables. + InfluxTables []string `mapstructure:"influx_tables"` + + // PyroscopeURL is the pyroscope url used to establish a connection with a + // pyroscope continuous profiling server. + PyroscopeURL string `mapstructure:"pyroscope_url"` + + // PyroscopeProfile is a flag that enables tracing with pyroscope. + PyroscopeTrace bool `mapstructure:"pyroscope_trace"` + + // PyroscopeProfileTypes is a list of profile types to be traced with + // pyroscope. Available profile types are: cpu, alloc_objects, alloc_space, + // inuse_objects, inuse_space, goroutines, mutex_count, mutex_duration, + // block_count, block_duration. + PyroscopeProfileTypes []string `mapstructure:"pyroscope_profile_types"` } // DefaultInstrumentationConfig returns a default configuration for metrics @@ -1207,6 +1227,19 @@ func DefaultInstrumentationConfig() *InstrumentationConfig { InfluxOrg: "celestia", InfluxBucket: "e2e", InfluxBatchSize: 20, + InfluxTables: DefaultInfluxTables, + PyroscopeURL: "", + PyroscopeTrace: false, + PyroscopeProfileTypes: []string{ + "cpu", + "alloc_objects", + "inuse_objects", + "goroutines", + "mutex_count", + "mutex_duration", + "block_count", + "block_duration", + }, } } @@ -1222,6 +1255,9 @@ func (cfg *InstrumentationConfig) ValidateBasic() error { if cfg.MaxOpenConnections < 0 { return errors.New("max_open_connections can't be negative") } + if cfg.PyroscopeTrace && cfg.PyroscopeURL == "" { + return errors.New("pyroscope_trace can't be enabled if profiling is disabled") + } // if there is not InfluxURL configured, then we do not need to validate the rest // of the config because we are not connecting. if cfg.InfluxURL == "" { diff --git a/config/config_test.go b/config/config_test.go index ab3212b509..43f0a46c49 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -157,8 +157,8 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) { "TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true}, "TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false}, "TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true}, - "TargetHeightDuration": {func(c *ConsensusConfig) { c.TargetHeightDuration = time.Second }, false}, - "TargetHeightDuration negative": {func(c *ConsensusConfig) { c.TargetHeightDuration = -1 }, true}, + "TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false}, + "TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true}, "PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false}, "PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true}, "PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false}, diff --git a/config/toml.go b/config/toml.go index 7c50608903..3a7d8492fd 100644 --- a/config/toml.go +++ b/config/toml.go @@ -464,10 +464,10 @@ timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}" timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}" # How much the timeout_precommit increases with each round timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}" -# TargetHeigtDuration is used to determine how long we wait after a -# block is committed. If this time is shorter than the actual time to reach -# consensus for that height, then we do not wait at all. -target_height_duration = "{{ .Consensus.TargetHeightDuration }}" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "{{ .Consensus.TimeoutCommit }}" # How many blocks to look back to check existence of the node's consensus votes before joining consensus # When non-zero, the node will panic upon restart @@ -556,6 +556,25 @@ influx_org = "{{ .Instrumentation.InfluxOrg }}" # The size of the batches that are sent to the database. influx_batch_size = {{ .Instrumentation.InfluxBatchSize }} + +# The list of tables that are updated when tracing. All available tables and +# their schema can be found in the pkg/trace/schema package. +influx_tables = [{{ range .Instrumentation.InfluxTables }}{{ printf "%q, " . }}{{end}}] + +# The URL of the pyroscope instance to use for continuous profiling. +# If empty, continuous profiling is disabled. +pyroscope_url = "{{ .Instrumentation.PyroscopeURL }}" + +# When true, tracing data is added to the continuous profiling +# performed by pyroscope. +pyroscope_trace = {{ .Instrumentation.PyroscopeTrace }} + +# pyroscope_profile_types is a list of profile types to be traced with +# pyroscope. Available profile types are: cpu, alloc_objects, alloc_space, +# inuse_objects, inuse_space, goroutines, mutex_count, mutex_duration, +# block_count, block_duration. +pyroscope_profile_types = [{{ range .Instrumentation.PyroscopeProfileTypes }}{{ printf "%q, " . }}{{end}}] + ` /****** these are for test settings ***********/ diff --git a/consensus/reactor.go b/consensus/reactor.go index aa93f5ac9d..f0c1937c45 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -15,6 +15,8 @@ import ( "github.com/tendermint/tendermint/libs/log" cmtsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/pkg/trace/schema" cmtcons "github.com/tendermint/tendermint/proto/tendermint/consensus" cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" @@ -47,7 +49,8 @@ type Reactor struct { eventBus *types.EventBus rs *cstypes.RoundState - Metrics *Metrics + Metrics *Metrics + traceClient *trace.Client } type ReactorOption func(*Reactor) @@ -56,10 +59,11 @@ type ReactorOption func(*Reactor) // consensusState. func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor { conR := &Reactor{ - conS: consensusState, - waitSync: waitSync, - rs: consensusState.GetRoundState(), - Metrics: NopMetrics(), + conS: consensusState, + waitSync: waitSync, + rs: consensusState.GetRoundState(), + Metrics: NopMetrics(), + traceClient: &trace.Client{}, } conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) @@ -334,6 +338,7 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1) + schema.WriteBlockPart(conR.traceClient, msg.Height, msg.Round, e.Src.ID(), msg.Part.Index, schema.TransferTypeDownload) conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) @@ -590,6 +595,7 @@ OUTER_LOOP: Part: *parts, }, }, logger) { + schema.WriteBlockPart(conR.traceClient, rs.Height, rs.Round, peer.ID(), part.Index, schema.TransferTypeUpload) ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } continue OUTER_LOOP @@ -1021,6 +1027,10 @@ func ReactorMetrics(metrics *Metrics) ReactorOption { return func(conR *Reactor) { conR.Metrics = metrics } } +func ReactorTracing(traceClient *trace.Client) ReactorOption { + return func(conR *Reactor) { conR.traceClient = traceClient } +} + //----------------------------------------------------------------------------- var ( @@ -1089,7 +1099,8 @@ func (ps *PeerState) MarshalJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - return cmtjson.Marshal(ps) + type jsonPeerState PeerState + return cmtjson.Marshal((*jsonPeerState)(ps)) } // GetHeight returns an atomic snapshot of the PeerRoundState's height diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index b6134c7ce0..a82aa38859 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -2,6 +2,7 @@ package consensus import ( "context" + "encoding/json" "fmt" "os" "path" @@ -1043,3 +1044,32 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { }) } } + +func TestMarshalJSONPeerState(t *testing.T) { + ps := NewPeerState(nil) + data, err := json.Marshal(ps) + require.NoError(t, err) + require.JSONEq(t, `{ + "round_state":{ + "height": "0", + "round": -1, + "step": 0, + "start_time": "0001-01-01T00:00:00Z", + "proposal": false, + "proposal_block_part_set_header": + {"total":0, "hash":""}, + "proposal_block_parts": null, + "proposal_pol_round": -1, + "proposal_pol": null, + "prevotes": null, + "precommits": null, + "last_commit_round": -1, + "last_commit": null, + "catchup_commit_round": -1, + "catchup_commit": null + }, + "stats":{ + "votes":"0", + "block_parts":"0"} + }`, string(data)) +} diff --git a/consensus/replay.go b/consensus/replay.go index 88b2f5f874..586ddebf80 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -2,6 +2,7 @@ package consensus import ( "bytes" + "context" "fmt" "hash/crc32" "io" @@ -238,17 +239,22 @@ func (h *Handshaker) NBlocks() int { } // TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { +func (h *Handshaker) Handshake(proxyApp proxy.AppConns) (string, error) { + return h.HandshakeWithContext(context.TODO(), proxyApp) +} + +// HandshakeWithContext is cancellable version of Handshake +func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.AppConns) (string, error) { // Handshake is done via ABCI Info on the query conn. res, err := proxyApp.Query().InfoSync(proxy.RequestInfo) if err != nil { - return fmt.Errorf("error calling Info: %v", err) + return "", fmt.Errorf("error calling Info: %v", err) } blockHeight := res.LastBlockHeight if blockHeight < 0 { - return fmt.Errorf("got a negative last block height (%d) from the app", blockHeight) + return "", fmt.Errorf("got a negative last block height (%d) from the app", blockHeight) } appHash := res.LastBlockAppHash @@ -265,9 +271,9 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + appHash, err = h.ReplayBlocksWithContext(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { - return fmt.Errorf("error on replay: %v", err) + return "", fmt.Errorf("error on replay: %v", err) } h.logger.Info("Completed ABCI Handshake - CometBFT and App are synced", @@ -275,7 +281,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // TODO: (on restart) replay mempool - return nil + return res.Version, nil } // ReplayBlocks replays all blocks since appBlockHeight and ensures the result @@ -286,6 +292,17 @@ func (h *Handshaker) ReplayBlocks( appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns, +) ([]byte, error) { + return h.ReplayBlocksWithContext(context.TODO(), state, appHash, appBlockHeight, proxyApp) +} + +// ReplayBlocksWithContext is cancellable version of ReplayBlocks. +func (h *Handshaker) ReplayBlocksWithContext( + ctx context.Context, + state sm.State, + appHash []byte, + appBlockHeight int64, + proxyApp proxy.AppConns, ) ([]byte, error) { storeBlockBase := h.store.Base() storeBlockHeight := h.store.Height() @@ -390,7 +407,7 @@ func (h *Handshaker) ReplayBlocks( // Either the app is asking for replay, or we're all synced up. if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false) } else if appBlockHeight == storeBlockHeight { // We're good! @@ -405,7 +422,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight < stateBlockHeight: // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, true) case appBlockHeight == stateBlockHeight: // We haven't run Commit (both the state and app are one block behind), @@ -435,6 +452,7 @@ func (h *Handshaker) ReplayBlocks( } func (h *Handshaker) replayBlocks( + ctx context.Context, state sm.State, proxyApp proxy.AppConns, appBlockHeight, @@ -461,6 +479,12 @@ func (h *Handshaker) replayBlocks( firstBlock = state.InitialHeight } for i := firstBlock; i <= finalBlock; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + h.logger.Info("Applying block", "height", i) block := h.store.LoadBlock(i) // Extra check to ensure the app was not changed in a way it shouldn't have. diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 03096fd74e..26cedffe52 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -324,7 +324,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) handshaker.SetEventBus(eventBus) - err = handshaker.Handshake(proxyApp) + _, err = handshaker.Handshake(proxyApp) if err != nil { cmtos.Exit(fmt.Sprintf("Error on handshake: %v", err)) } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index a1b703f8d6..93dc33d458 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -32,6 +32,7 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" ) func TestMain(m *testing.M) { @@ -754,13 +755,14 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin } }) - err := handshaker.Handshake(proxyApp) + softwareVersion, err := handshaker.Handshake(proxyApp) if expectError { require.Error(t, err) return } else if err != nil { t.Fatalf("Error on abci handshake: %v", err) } + require.Equal(t, softwareVersion, version.ABCISemVer) // get the latest app hash from the app res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""}) @@ -932,7 +934,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(proxyApp); err != nil { + _, err = h.Handshake(proxyApp) + if err != nil { t.Log(err) } }) @@ -956,7 +959,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(proxyApp); err != nil { + _, err = h.Handshake(proxyApp) + if err != nil { t.Log(err) } }) @@ -1271,9 +1275,12 @@ func TestHandshakeUpdatesValidators(t *testing.T) { t.Error(err) } }) - if err := handshaker.Handshake(proxyApp); err != nil { + version, err := handshaker.Handshake(proxyApp) + if err != nil { t.Fatalf("Error on abci handshake: %v", err) } + require.Equal(t, customVersion, version) + // reload the state, check the validator set was updated state, err = stateStore.Load() require.NoError(t, err) @@ -1284,6 +1291,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) { assert.Equal(t, newValAddr, expectValAddr) } +const customVersion = "v1.0.0" + // returns the vals on InitChain type initChainApp struct { abci.BaseApplication @@ -1295,3 +1304,9 @@ func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitC Validators: ica.vals, } } + +func (ica *initChainApp) Info(req abci.RequestInfo) abci.ResponseInfo { + return abci.ResponseInfo{ + Version: customVersion, + } +} diff --git a/consensus/state.go b/consensus/state.go index b9e90e2103..bbbb1d3d44 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -25,6 +25,7 @@ import ( cmtsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/pkg/trace/schema" cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -142,7 +143,7 @@ type State struct { // for reporting metrics metrics *Metrics - eventCollector *trace.Client + traceClient *trace.Client } // StateOption sets an optional parameter on the State. @@ -173,7 +174,7 @@ func NewState( evpool: evpool, evsw: cmtevents.NewEventSwitch(), metrics: NopMetrics(), - eventCollector: &trace.Client{}, + traceClient: &trace.Client{}, } // set function defaults (may be overwritten before calling Start) @@ -215,9 +216,9 @@ func StateMetrics(metrics *Metrics) StateOption { return func(cs *State) { cs.metrics = metrics } } -// SetEventCollector sets the remote event collector. -func SetEventCollector(ec *trace.Client) StateOption { - return func(cs *State) { cs.eventCollector = ec } +// SetTraceClient sets the remote event collector. +func SetTraceClient(ec *trace.Client) StateOption { + return func(cs *State) { cs.traceClient = ec } } // String returns a string. @@ -664,11 +665,14 @@ func (cs *State) updateToState(state sm.State) { cs.updateRoundStep(0, cstypes.RoundStepNewHeight) if cs.CommitTime.IsZero() { - // If it is the first block, start time is equal to - // states last block time which is the genesis time. - cs.StartTime = state.LastBlockTime + // "Now" makes it easier to sync up dev nodes. + // We add timeoutCommit to allow transactions + // to be gathered for the first block. + // And alternative solution that relies on clocks: + // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) + cs.StartTime = cs.config.Commit(cmttime.Now()) } else { - cs.StartTime = cs.config.NextStartTime(cs.StartTime) + cs.StartTime = cs.config.Commit(cs.CommitTime) } cs.Validators = validators @@ -700,6 +704,8 @@ func (cs *State) newStep() { cs.nSteps++ + schema.WriteRoundState(cs.traceClient, cs.Height, cs.Round, cs.Step) + // newStep is called by updateToState in NewState before the eventBus is set! if cs.eventBus != nil { if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil { @@ -1002,7 +1008,7 @@ func (cs *State) enterNewRound(height int64, round int32) { logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now) } - logger.Debug("entering new round", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + prevHeight, prevRound, prevStep := cs.Height, cs.Round, cs.Step // increment validators if necessary validators := cs.Validators @@ -1016,17 +1022,23 @@ func (cs *State) enterNewRound(height int64, round int32) { // but we fire an event, so update the round step first cs.updateRoundStep(round, cstypes.RoundStepNewRound) cs.Validators = validators + propAddress := validators.GetProposer().PubKey.Address() if round == 0 { // We've already reset these upon new height, // and meanwhile we might have received a proposal // for round 0. } else { - logger.Debug("resetting proposal info") + logger.Info("resetting proposal info", "proposer", propAddress) cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil } + logger.Debug("entering new round", + "previous", log.NewLazySprintf("%v/%v/%v", prevHeight, prevRound, prevStep), + "proposer", propAddress, + ) + cs.Votes.SetRound(cmtmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping cs.TriggeredTimeoutPrecommit = false @@ -1824,9 +1836,14 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { } } + blockSize := block.Size() + + // trace some metadata about the block + schema.WriteBlock(cs.traceClient, block, blockSize) + cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) - cs.metrics.BlockSizeBytes.Set(float64(block.Size())) + cs.metrics.BlockSizeBytes.Set(float64(blockSize)) cs.metrics.CommittedHeight.Set(float64(block.Height)) } @@ -1852,7 +1869,8 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { p := proposal.ToProto() // Verify signature - if !cs.Validators.GetProposer().PubKey.VerifySignature( + pubKey := cs.Validators.GetProposer().PubKey + if !pubKey.VerifySignature( types.ProposalSignBytes(cs.state.ChainID, p), proposal.Signature, ) { return ErrInvalidProposalSignature @@ -1867,7 +1885,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } - cs.Logger.Info("received proposal", "proposal", proposal) + cs.Logger.Info("received proposal", "proposal", proposal, "proposer", pubKey.Address()) return nil } diff --git a/consensus/state_test.go b/consensus/state_test.go index 3110f67d50..5e3a76c545 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "strings" "testing" "time" @@ -240,61 +241,83 @@ func TestStateBadProposal(t *testing.T) { } func TestStateOversizedBlock(t *testing.T) { - cs1, vss := randState(2) - cs1.state.ConsensusParams.Block.MaxBytes = 2000 - height, round := cs1.Height, cs1.Round - vs2 := vss[1] - - partSize := types.BlockPartSizeBytes - - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - - propBlock, _ := cs1.createProposalBlock() - propBlock.Data.Txs = []types.Tx{cmtrand.Bytes(2001)} - propBlock.Header.DataHash = propBlock.Data.Hash() - - // make the second validator the proposer by incrementing round - round++ - incrementRound(vss[1:]...) - - propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(height, round, -1, blockID) - p := proposal.ToProto() - if err := vs2.SignProposal(config.ChainID(), p); err != nil { - t.Fatal("failed to sign bad proposal", err) + const maxBytes = 2000 + + for _, testCase := range []struct { + name string + oversized bool + }{ + { + name: "max size, correct block", + oversized: false, + }, + { + name: "off-by-1 max size, incorrect block", + oversized: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + cs1, vss := randState(2) + cs1.state.ConsensusParams.Block.MaxBytes = maxBytes + height, round := cs1.Height, cs1.Round + vs2 := vss[1] + + partSize := types.BlockPartSizeBytes + + propBlock, propBlockParts := findBlockSizeLimit(t, height, maxBytes, cs1, partSize, testCase.oversized) + + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + + // make the second validator the proposer by incrementing round + round++ + incrementRound(vss[1:]...) + + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal := types.NewProposal(height, round, -1, blockID) + p := proposal.ToProto() + if err := vs2.SignProposal(config.ChainID(), p); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + proposal.Signature = p.Signature + + totalBytes := 0 + for i := 0; i < int(propBlockParts.Total()); i++ { + part := propBlockParts.GetPart(i) + totalBytes += len(part.Bytes) + } + + if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + // start the machine + startTestRound(cs1, height, round) + + t.Log("Block Sizes;", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) + + validateHash := propBlock.Hash() + lockedRound := int32(1) + if testCase.oversized { + validateHash = nil + lockedRound = -1 + // if the block is oversized cs1 should log an error with the block part message as it exceeds + // the consensus params. The block is not added to cs.ProposalBlock so the node timeouts. + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + // and then should send nil prevote and precommit regardless of whether other validators prevote and + // precommit on it + } + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], validateHash) + + signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevote(voteCh, height, round) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, lockedRound, vss[0], validateHash, validateHash) + + signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + }) } - proposal.Signature = p.Signature - - totalBytes := 0 - for i := 0; i < int(propBlockParts.Total()); i++ { - part := propBlockParts.GetPart(i) - totalBytes += len(part.Bytes) - } - - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - - // start the machine - startTestRound(cs1, height, round) - - t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) - - // c1 should log an error with the block part message as it exceeds the consensus params. The - // block is not added to cs.ProposalBlock so the node timeouts. - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) - - // and then should send nil prevote and precommit regardless of whether other validators prevote and - // precommit on it - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } //---------------------------------------------------------------------------------------------------- @@ -1914,3 +1937,31 @@ func subscribeUnBuffered(eventBus *types.EventBus, q cmtpubsub.Query) <-chan cmt } return sub.Out() } + +func findBlockSizeLimit(t *testing.T, height, maxBytes int64, cs *State, partSize uint32, oversized bool) (*types.Block, *types.PartSet) { + var offset int64 + if !oversized { + offset = -2 + } + softMaxDataBytes := int(types.MaxDataBytes(maxBytes, 0, 0)) + for i := softMaxDataBytes; i < softMaxDataBytes*2; i++ { + propBlock, propBlockParts := cs.state.MakeBlock( + height, + types.Data{Txs: []types.Tx{[]byte("a=" + strings.Repeat("o", i-2))}}, + &types.Commit{}, + nil, + cs.privValidatorPubKey.Address(), + ) + + if propBlockParts.ByteSize() > maxBytes+offset { + s := "real max" + if oversized { + s = "off-by-1" + } + t.Log("Detected "+s+" data size for block;", "size", i, "softMaxDataBytes", softMaxDataBytes) + return propBlock, propBlockParts + } + } + require.Fail(t, "We shouldn't hit the end of the loop") + return nil, nil +} diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index caf789caf4..4d64e8ae08 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -267,3 +267,14 @@ is ignored and the data is retrieved as if `match_events=false`. Additionally, if a node that was running Tendermint Core when the data was first indexed, and switched to CometBFT, is queried, it will retrieve this previously indexed data as if `match_events=false` (attributes can match the query conditions across different events on the same height). + + +# Event attribute value types + +Users can use anything as an event value. However, if the event attribute value is a number, the following restrictions apply: + +- Negative numbers will not be properly retrieved when querying the indexer +- When querying the events using `tx_search` and `block_search`, the value given as part of the condition cannot be a float. +- Any event value retrieved from the database will be represented as a `BigInt` (from `math/big`) +- Floating point values are not read from the database even with the introduction of `BigInt`. This was intentionally done +to keep the same beheaviour as was historically present and not introduce breaking changes. This will be fixed in the 0.38 series. diff --git a/docs/core/configuration.md b/docs/core/configuration.md index 3c2cffdbe1..dab0c97466 100644 --- a/docs/core/configuration.md +++ b/docs/core/configuration.md @@ -414,10 +414,10 @@ timeout_prevote_delta = "500ms" timeout_precommit = "1s" # How much the timeout_precommit increases with each round timeout_precommit_delta = "500ms" -# TargetHeigtDuration is used to determine how long we wait after a -# block is committed. If this time is shorter than the actual time to reach -# consensus for that height, then we do not wait at all. -target_height_duration = "15s" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" # How many blocks to look back to check existence of the node's consensus votes before joining consensus # When non-zero, the node will panic upon restart @@ -537,9 +537,12 @@ timeout_prevote = "1s" timeout_prevote_delta = "500ms" timeout_precommit = "1s" timeout_precommit_delta = "500ms" -target_height_duration = "1s" +timeout_commit = "1s" ``` +Note that in a successful round, the only timeout that we absolutely wait no +matter what is `timeout_commit`. + Here's a brief summary of the timeouts: - `timeout_propose` = how long we wait for a proposal block before prevoting nil @@ -549,8 +552,7 @@ Here's a brief summary of the timeouts: - `timeout_prevote_delta` = how much the `timeout_prevote` increases with each round - `timeout_precommit` = how long we wait after receiving +2/3 precommits for anything (ie. not a single block or nil) -- `timeout_precommit_delta` = how much the timeout_precommit increases with - each round -- `target_height_duration` = used to determine how long we wait after a - block is committed. If this time is shorter than the actual time to reach - consensus for that height, then we do not wait at all. +- `timeout_precommit_delta` = how much the `timeout_precommit` increases with each round +- `timeout_commit` = how long we wait after committing a block, before starting + on the new height (this gives us a chance to receive some more precommits, + even though we already have +2/3) diff --git a/docs/core/subscription.md b/docs/core/subscription.md index 3a5d60cd16..796a415ff1 100644 --- a/docs/core/subscription.md +++ b/docs/core/subscription.md @@ -40,6 +40,20 @@ You can also use tags, given you had included them into DeliverTx response, to query transaction results. See [Indexing transactions](./indexing-transactions.md) for details. + +## Query parameter and event type restrictions + +While CometBFT imposes no restrictions on the application with regards to the type of +the event output, there are several restrictions when it comes to querying +events whose attribute values are numeric. + +- Queries cannot include negative numbers +- If floating points are compared to integers, they are converted to an integer +- Floating point to floating point comparison leads to a loss of precision for very big floating point numbers +(e.g., `10000000000000000000.0` is treated the same as `10000000000000000000.6`) +- When floating points do get converted to integers, they are always rounded down. +This has been done to preserve the behaviour present before introducing the support for BigInts in the query parameters. + ## ValidatorSetUpdates When validator set changes, ValidatorSetUpdates event is published. The diff --git a/go.mod b/go.mod index 1655a96ceb..b10140f2a4 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.13.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 ) require ( @@ -54,7 +54,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.0 github.com/btcsuite/btcd/btcec/v2 v2.2.1 github.com/btcsuite/btcd/btcutil v1.1.2 - github.com/celestiaorg/nmt v0.16.0 + github.com/celestiaorg/nmt v0.19.0 github.com/cometbft/cometbft-db v0.7.0 github.com/go-git/go-git/v5 v5.5.1 github.com/vektra/mockery/v2 v2.14.0 @@ -62,7 +62,14 @@ require ( google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 ) -require github.com/influxdata/influxdb-client-go/v2 v2.12.2 +require ( + github.com/influxdata/influxdb-client-go/v2 v2.12.2 + github.com/pyroscope-io/client v0.7.2 + github.com/pyroscope-io/otel-profiling-go v0.4.0 + go.opentelemetry.io/otel v1.15.1 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 + go.opentelemetry.io/otel/sdk v1.15.1 +) require ( 4d63.com/gochecknoglobals v0.1.0 // indirect @@ -131,7 +138,7 @@ require ( github.com/go-critic/go-critic v0.6.5 // indirect github.com/go-git/gcfg v1.5.0 // indirect github.com/go-git/go-billy/v5 v5.3.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-toolsmith/astcast v1.0.0 // indirect github.com/go-toolsmith/astcopy v1.0.2 // indirect @@ -229,6 +236,7 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect + github.com/pyroscope-io/godeltaprof v0.1.2 // indirect github.com/quasilyte/go-ruleguard v0.3.18 // indirect github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect @@ -276,9 +284,8 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect - go.opentelemetry.io/otel v1.11.0 // indirect go.opentelemetry.io/otel/metric v0.32.3 // indirect - go.opentelemetry.io/otel/trace v1.11.0 // indirect + go.opentelemetry.io/otel/trace v1.15.1 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.23.0 // indirect @@ -286,7 +293,7 @@ require ( golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.5.0 // indirect + golang.org/x/sys v0.7.0 // indirect golang.org/x/term v0.5.0 // indirect golang.org/x/text v0.7.0 // indirect golang.org/x/tools v0.4.0 // indirect diff --git a/go.sum b/go.sum index 6244430c7e..6b38d52da8 100644 --- a/go.sum +++ b/go.sum @@ -150,8 +150,8 @@ github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnu github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/celestiaorg/nmt v0.16.0 h1:4CX6d1Uwf1C+tGcAWskPve0HCDTnI4Ey8ffjiDwcGH0= -github.com/celestiaorg/nmt v0.16.0/go.mod h1:GfwIvQPhUakn1modWxJ+rv8dUjJzuXg5H+MLFM1o7nY= +github.com/celestiaorg/nmt v0.19.0 h1:9VXFeI/gt+q8h5HeCE0RjXJhOxsFzxJUjHrkvF9CMYE= +github.com/celestiaorg/nmt v0.19.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= @@ -311,8 +311,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -423,6 +423,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -744,6 +745,12 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/pyroscope-io/client v0.7.2 h1:OX2qdUQsS8RSkn/3C8isD7f/P0YiZQlRbAlecAaj/R8= +github.com/pyroscope-io/client v0.7.2/go.mod h1:FEocnjn+Ngzxy6EtU9ZxXWRvQ0+pffkrBxHLnPpxwi8= +github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= +github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= +github.com/pyroscope-io/otel-profiling-go v0.4.0 h1:Hk/rbUqOWoByoWy1tt4r5BX5xoKAvs5drr0511Ki8ic= +github.com/pyroscope-io/otel-profiling-go v0.4.0/go.mod h1:MXaofiWU7PgLP7eISUZJYVO4Z8WYMqpkYgeP4XrPLyg= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= github.com/quasilyte/go-ruleguard v0.3.18 h1:sd+abO1PEI9fkYennwzHn9kl3nqP6M5vE7FiOzZ+5CE= github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs= @@ -855,8 +862,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -872,6 +879,9 @@ github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpR github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/timonwong/loggercheck v0.9.3 h1:ecACo9fNiHxX4/Bc02rW2+kaJIAMAes7qJ7JKxt0EZI= @@ -928,12 +938,18 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 h1:syAz40OyelLZo42+3U68Phisvrx4qh+4wpdZw7eUUdY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw= -go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk= -go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk= +go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= +go.opentelemetry.io/otel v1.15.1 h1:3Iwq3lfRByPaws0f6bU3naAqOR1n5IeDWd9390kWHa8= +go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 h1:2PunuO5SbkN5MhCbuHCd3tC6qrcaj+uDAkX/qBU5BAs= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1/go.mod h1:q8+Tha+5LThjeSU8BW93uUC5w5/+DnYHMKBMpRCsui0= go.opentelemetry.io/otel/metric v0.32.3 h1:dMpnJYk2KULXr0j8ph6N7+IcuiIQXlPXD4kix9t7L9c= go.opentelemetry.io/otel/metric v0.32.3/go.mod h1:pgiGmKohxHyTPHGOff+vrtIH39/R9fiO/WoenUQ3kcc= -go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI= -go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= +go.opentelemetry.io/otel/sdk v1.15.1 h1:5FKR+skgpzvhPQHIEfcwMYjCBr14LWzs3uSqKiQzETI= +go.opentelemetry.io/otel/sdk v1.15.1/go.mod h1:8rVtxQfrbmbHKfqzpQkT5EzZMcbMBwTzNAggbEAM0KA= +go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= +go.opentelemetry.io/otel/trace v1.15.1 h1:uXLo6iHJEzDfrNC0L0mNjItIp06SyaBQxu5t3xMlngY= +go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= @@ -1173,8 +1189,8 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index 83829cbe49..1819c542fa 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -11,6 +11,7 @@ package query import ( "fmt" + "math/big" "reflect" "regexp" "strconv" @@ -151,16 +152,17 @@ func (q *Query) Conditions() ([]Condition, error) { conditions = append(conditions, Condition{eventAttr, op, value}) } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", - err, number, + valueBig := new(big.Int) + _, ok := valueBig.SetString(number, 10) + if !ok { + err := fmt.Errorf( + "problem parsing %s as bigint (should never happen if the grammar is correct)", + number, ) return nil, err } + conditions = append(conditions, Condition{eventAttr, op, valueBig}) - conditions = append(conditions, Condition{eventAttr, op, value}) } case ruletime: @@ -298,11 +300,12 @@ func (q *Query) Matches(events map[string][]string) (bool, error) { return false, nil } } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", - err, number, + value := new(big.Int) + _, ok := value.SetString(number, 10) + if !ok { + err := fmt.Errorf( + "problem parsing %s as bigInt (should never happen if the grammar is correct)", + number, ) return false, err } @@ -451,42 +454,58 @@ func matchValue(value string, op Operator, operand reflect.Value) (bool, error) return v == operandFloat64, nil } - case reflect.Int64: - var v int64 + case reflect.Pointer: - operandInt := operand.Interface().(int64) - filteredValue := numRegex.FindString(value) + switch operand.Interface().(type) { + case *big.Int: + filteredValue := numRegex.FindString(value) + operandVal := operand.Interface().(*big.Int) + v := new(big.Int) + if strings.ContainsAny(filteredValue, ".") { + // We do this just to check whether the string can be parsed as a float + _, err := strconv.ParseFloat(filteredValue, 64) + if err != nil { + err = fmt.Errorf( + "got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", + err, filteredValue, + ) + return false, err + } - // if value looks like float, we try to parse it as float - if strings.ContainsAny(filteredValue, ".") { - v1, err := strconv.ParseFloat(filteredValue, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to float64: %w", filteredValue, err) - } + // If yes, we get the int part of the string. + // We could simply cast the float to an int and use that to create a big int but + // if it is a number bigger than int64, it will not be parsed properly. + // If we use bigFloat and convert that to a string, the values will be rounded which + // is not what we want either. + // Here we are simulating the behavior that int64(floatValue). This was the default behavior + // before introducing BigInts and we do not want to break the logic in minor releases. + _, ok := v.SetString(strings.Split(filteredValue, ".")[0], 10) + if !ok { + return false, fmt.Errorf("failed to convert value %s from float to big int", filteredValue) + } + } else { + // try our best to convert value from tags to big int + _, ok := v.SetString(filteredValue, 10) + if !ok { + return false, fmt.Errorf("failed to convert value %v from event attribute to big int", filteredValue) + } - v = int64(v1) - } else { - var err error - // try our best to convert value from tags to int64 - v, err = strconv.ParseInt(filteredValue, 10, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to int64: %w", filteredValue, err) } - } + cmpRes := operandVal.Cmp(v) + switch op { + case OpLessEqual: + return cmpRes == 0 || cmpRes == 1, nil + case OpGreaterEqual: + return cmpRes == 0 || cmpRes == -1, nil + case OpLess: + return cmpRes == 1, nil + case OpGreater: + return cmpRes == -1, nil + case OpEqual: + return cmpRes == 0, nil + } - switch op { - case OpLessEqual: - return v <= operandInt, nil - case OpGreaterEqual: - return v >= operandInt, nil - case OpLess: - return v < operandInt, nil - case OpGreater: - return v > operandInt, nil - case OpEqual: - return v == operandInt, nil } - case reflect.String: switch op { case OpEqual: diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go index d511e7fab8..34ea0d0c00 100644 --- a/libs/pubsub/query/query_test.go +++ b/libs/pubsub/query/query_test.go @@ -2,6 +2,7 @@ package query_test import ( "fmt" + "math/big" "testing" "time" @@ -11,6 +12,57 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) +func TestBigNumbers(t *testing.T) { + bigInt := "10000000000000000000" + bigIntAsFloat := "10000000000000000000.0" + bigFloat := "10000000000000000000.6" + bigFloatLowerRounding := "10000000000000000000.1" + doubleBigInt := "20000000000000000000" + + testCases := []struct { + s string + events map[string][]string + err bool + matches bool + matchErr bool + }{ + + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {bigInt}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {bigIntAsFloat}}, false, true, false}, + {"account.balance <= " + doubleBigInt, map[string][]string{"account.balance": {bigInt}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {"10000000000000000001"}}, false, false, false}, + {"account.balance <= " + doubleBigInt, map[string][]string{"account.balance": {bigFloat}}, false, true, false}, + // To maintain compatibility with the old implementation which did a simple cast of float to int64, we do not round the float + // Thus both 10000000000000000000.6 and "10000000000000000000.1 are equal to 10000000000000000000 + // and the test does not find a match + {"account.balance > " + bigInt, map[string][]string{"account.balance": {bigFloat}}, false, false, false}, + {"account.balance > " + bigInt, map[string][]string{"account.balance": {bigFloatLowerRounding}}, true, false, false}, + // This test should also find a match, but floats that are too big cannot be properly converted, thus + // 10000000000000000000.6 gets rounded to 10000000000000000000 + {"account.balance > " + bigIntAsFloat, map[string][]string{"account.balance": {bigFloat}}, false, false, false}, + {"account.balance > 11234.0", map[string][]string{"account.balance": {"11234.6"}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {"1000.45"}}, false, true, false}, + } + + for _, tc := range testCases { + q, err := query.New(tc.s) + if !tc.err { + require.Nil(t, err) + } + require.NotNil(t, q, "Query '%s' should not be nil", tc.s) + + if tc.matches { + match, err := q.Matches(tc.events) + assert.Nil(t, err, "Query '%s' should not error on match %v", tc.s, tc.events) + assert.True(t, match, "Query '%s' should match %v", tc.s, tc.events) + } else { + match, err := q.Matches(tc.events) + assert.Equal(t, tc.matchErr, err != nil, "Unexpected error for query '%s' match %v", tc.s, tc.events) + assert.False(t, match, "Query '%s' should not match %v", tc.s, tc.events) + } + } +} + func TestMatches(t *testing.T) { var ( txDate = "2017-01-01" @@ -180,6 +232,10 @@ func TestConditions(t *testing.T) { txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") require.NoError(t, err) + bigInt := new(big.Int) + bigInt, ok := bigInt.SetString("10000000000000000000", 10) + require.True(t, ok) + testCases := []struct { s string conditions []query.Condition @@ -193,8 +249,24 @@ func TestConditions(t *testing.T) { { s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{ - {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, - {CompositeKey: "tx.gas", Op: query.OpLess, Operand: int64(9)}, + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: big.NewInt(7)}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, + }, + }, + { + + s: "tx.gas > 7.5 AND tx.gas < 9", + conditions: []query.Condition{ + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: 7.5}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, + }, + }, + { + + s: "tx.gas > " + bigInt.String() + " AND tx.gas < 9", + conditions: []query.Condition{ + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: bigInt}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, }, }, { diff --git a/mempool/cat/reactor.go b/mempool/cat/reactor.go index fb0adbf4b6..0f7cc642e9 100644 --- a/mempool/cat/reactor.go +++ b/mempool/cat/reactor.go @@ -12,6 +12,8 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/pkg/trace/schema" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/types" ) @@ -35,10 +37,11 @@ const ( // spec under /.spec.md type Reactor struct { p2p.BaseReactor - opts *ReactorOptions - mempool *TxPool - ids *mempoolIDs - requests *requestScheduler + opts *ReactorOptions + mempool *TxPool + ids *mempoolIDs + requests *requestScheduler + traceClient *trace.Client } type ReactorOptions struct { @@ -52,6 +55,9 @@ type ReactorOptions struct { // MaxGossipDelay is the maximum allotted time that the reactor expects a transaction to // arrive before issuing a new request to a different peer MaxGossipDelay time.Duration + + // TraceClient is the trace client for collecting trace level events + TraceClient *trace.Client } func (opts *ReactorOptions) VerifyAndComplete() error { @@ -81,10 +87,11 @@ func NewReactor(mempool *TxPool, opts *ReactorOptions) (*Reactor, error) { return nil, err } memR := &Reactor{ - opts: opts, - mempool: mempool, - ids: newMempoolIDs(), - requests: newRequestScheduler(opts.MaxGossipDelay, defaultGlobalRequestTimeout), + opts: opts, + mempool: mempool, + ids: newMempoolIDs(), + requests: newRequestScheduler(opts.MaxGossipDelay, defaultGlobalRequestTimeout), + traceClient: &trace.Client{}, } memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) return memR, nil @@ -203,6 +210,9 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { // NOTE: This setup also means that we can support older mempool implementations that simply // flooded the network with transactions. case *protomem.Txs: + for _, tx := range msg.Txs { + schema.WriteMempoolTx(memR.traceClient, e.Src.ID(), tx, schema.TransferTypeDownload, schema.CatVersionFieldValue) + } protoTxs := msg.GetTxs() if len(protoTxs) == 0 { memR.Logger.Error("received empty txs from peer", "src", e.Src) @@ -245,6 +255,13 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { // 3. If we recently evicted the tx and still don't have space for it, we do nothing. // 4. Else, we request the transaction from that peer. case *protomem.SeenTx: + schema.WriteMempoolPeerState( + memR.traceClient, + e.Src.ID(), + schema.SeenTxStateUpdateFieldValue, + schema.TransferTypeDownload, + schema.CatVersionFieldValue, + ) txKey, err := types.TxKeyFromBytes(msg.TxKey) if err != nil { memR.Logger.Error("peer sent SeenTx with incorrect tx key", "err", err) @@ -272,6 +289,13 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { // A peer is requesting a transaction that we have claimed to have. Find the specified // transaction and broadcast it to the peer. We may no longer have the transaction case *protomem.WantTx: + schema.WriteMempoolPeerState( + memR.traceClient, + e.Src.ID(), + schema.WantTxStateUpdateFieldValue, + schema.TransferTypeDownload, + schema.CatVersionFieldValue, + ) txKey, err := types.TxKeyFromBytes(msg.TxKey) if err != nil { memR.Logger.Error("peer sent WantTx with incorrect tx key", "err", err) @@ -281,6 +305,13 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { tx, has := memR.mempool.Get(txKey) if has && !memR.opts.ListenOnly { peerID := memR.ids.GetIDForPeer(e.Src.ID()) + schema.WriteMempoolTx( + memR.traceClient, + e.Src.ID(), + msg.TxKey, + schema.TransferTypeUpload, + schema.CatVersionFieldValue, + ) memR.Logger.Debug("sending a tx in response to a want msg", "peer", peerID) if p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ //nolint:staticcheck ChannelID: mempool.MempoolChannel, diff --git a/mempool/v0/clist_mempool.go b/mempool/v0/clist_mempool.go index 65365fa63d..07e9cb484c 100644 --- a/mempool/v0/clist_mempool.go +++ b/mempool/v0/clist_mempool.go @@ -395,6 +395,20 @@ func (mem *CListMempool) resCbFirstTime( return } + // Check transaction not already in the mempool + if e, ok := mem.txsMap.Load(types.Tx(tx).Key()); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) + memTx.senders.LoadOrStore(peerID, true) + mem.logger.Debug( + "transaction already there, not adding it again", + "tx", types.Tx(tx).Hash(), + "res", r, + "height", mem.height, + "total", mem.Size(), + ) + return + } + memTx := &mempoolTx{ height: mem.height, gasWanted: r.CheckTx.GasWanted, diff --git a/mempool/v0/clist_mempool_test.go b/mempool/v0/clist_mempool_test.go index 824a24a1af..f00413a89f 100644 --- a/mempool/v0/clist_mempool_test.go +++ b/mempool/v0/clist_mempool_test.go @@ -7,6 +7,7 @@ import ( "fmt" mrand "math/rand" "os" + "strconv" "testing" "time" @@ -641,6 +642,51 @@ func TestMempoolTxsBytes(t *testing.T) { } +func TestMempoolNoCacheOverflow(t *testing.T) { + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) + app := kvstore.NewApplication() + _, server := newRemoteApp(t, sockPath, app) + t.Cleanup(func() { + if err := server.Stop(); err != nil { + t.Error(err) + } + }) + cfg := config.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) + defer cleanup() + + // add tx0 + var tx0 = types.Tx([]byte{0x01}) + err := mp.CheckTx(tx0, nil, mempool.TxInfo{}) + require.NoError(t, err) + err = mp.FlushAppConn() + require.NoError(t, err) + + // saturate the cache to remove tx0 + for i := 1; i <= mp.config.CacheSize; i++ { + err = mp.CheckTx(types.Tx([]byte(strconv.Itoa(i))), nil, mempool.TxInfo{}) + require.NoError(t, err) + } + err = mp.FlushAppConn() + require.NoError(t, err) + assert.False(t, mp.cache.Has(types.Tx([]byte{0x01}))) + + // add again tx0 + err = mp.CheckTx(tx0, nil, mempool.TxInfo{}) + require.NoError(t, err) + err = mp.FlushAppConn() + require.NoError(t, err) + + // tx0 should appear only once in mp.txs + found := 0 + for e := mp.txs.Front(); e != nil; e = e.Next() { + if types.Tx.Key(e.Value.(*mempoolTx).tx) == types.Tx.Key(tx0) { + found++ + } + } + assert.True(t, found == 1) +} + // This will non-deterministically catch some concurrency failures like // https://github.com/tendermint/tendermint/issues/3509 // TODO: all of the tests should probably also run using the remote proxy app diff --git a/mempool/v1/reactor.go b/mempool/v1/reactor.go index 8a7ddca697..029faf7e55 100644 --- a/mempool/v1/reactor.go +++ b/mempool/v1/reactor.go @@ -13,6 +13,8 @@ import ( cmtsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/pkg/trace/schema" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/types" ) @@ -22,9 +24,10 @@ import ( // peers you received it from. type Reactor struct { p2p.BaseReactor - config *cfg.MempoolConfig - mempool *TxMempool - ids *mempoolIDs + config *cfg.MempoolConfig + mempool *TxMempool + ids *mempoolIDs + traceClient *trace.Client } type mempoolIDs struct { @@ -91,11 +94,12 @@ func newMempoolIDs() *mempoolIDs { } // NewReactor returns a new Reactor with the given config and mempool. -func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool) *Reactor { +func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool, traceClient *trace.Client) *Reactor { memR := &Reactor{ - config: config, - mempool: mempool, - ids: newMempoolIDs(), + config: config, + mempool: mempool, + ids: newMempoolIDs(), + traceClient: traceClient, } memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) return memR @@ -160,6 +164,15 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) switch msg := e.Message.(type) { case *protomem.Txs: + for _, tx := range msg.Txs { + schema.WriteMempoolTx( + memR.traceClient, + e.Src.ID(), + tx, + schema.TransferTypeDownload, + schema.V1VersionFieldValue, + ) + } protoTxs := msg.GetTxs() if len(protoTxs) == 0 { memR.Logger.Error("received tmpty txs from peer", "src", e.Src) @@ -270,6 +283,13 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) continue } + schema.WriteMempoolTx( + memR.traceClient, + peer.ID(), + memTx.tx, + schema.TransferTypeUpload, + schema.V1VersionFieldValue, + ) } select { diff --git a/mempool/v1/reactor_test.go b/mempool/v1/reactor_test.go index 74f9f469ff..4f0c5e391a 100644 --- a/mempool/v1/reactor_test.go +++ b/mempool/v1/reactor_test.go @@ -14,6 +14,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/p2p/mock" + "github.com/tendermint/tendermint/pkg/trace" cfg "github.com/tendermint/tendermint/config" @@ -133,7 +134,7 @@ func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { mempool, cleanup := newMempoolWithApp(cc) defer cleanup() - reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states + reactors[i] = NewReactor(config.Mempool, mempool, &trace.Client{}) // so we dont start the consensus states reactors[i].SetLogger(logger.With("validator", i)) } diff --git a/networks/local/localnode/Dockerfile b/networks/local/localnode/Dockerfile index e1c3c45270..197d07a70c 100644 --- a/networks/local/localnode/Dockerfile +++ b/networks/local/localnode/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.7 +FROM alpine:3.14 RUN apk update && \ apk upgrade && \ diff --git a/node/node.go b/node/node.go index 2db2ec86f5..c19efe88c3 100644 --- a/node/node.go +++ b/node/node.go @@ -13,7 +13,9 @@ import ( dbm "github.com/cometbft/cometbft-db" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/pyroscope-io/client/pyroscope" "github.com/rs/cors" + sdktrace "go.opentelemetry.io/otel/sdk/trace" abci "github.com/tendermint/tendermint/abci/types" bcv0 "github.com/tendermint/tendermint/blockchain/v0" @@ -117,17 +119,17 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { } // MetricsProvider returns a consensus, p2p and mempool Metrics. -type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) +type MetricsProvider func(chainID, softwareVersion string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) // DefaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { - return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { + return func(chainID, softwareVersion string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { if config.Prometheus { - return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), - p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), - mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), - sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) + return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID, "version", softwareVersion), + p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID, "version", softwareVersion), + mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID, "version", softwareVersion), + sm.PrometheusMetrics(config.Namespace, "chain_id", chainID, "version", softwareVersion) } return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() } @@ -233,6 +235,8 @@ type Node struct { indexerService *txindex.IndexerService prometheusSrv *http.Server influxDBClient *trace.Client + pyroscopeProfiler *pyroscope.Profiler + pyroscopeTracer *sdktrace.TracerProvider } func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { @@ -318,6 +322,7 @@ func createAndStartIndexerService( } func doHandshake( + ctx context.Context, stateStore sm.Store, state sm.State, blockStore sm.BlockStore, @@ -325,14 +330,11 @@ func doHandshake( eventBus types.BlockEventPublisher, proxyApp proxy.AppConns, consensusLogger log.Logger, -) error { +) (string, error) { handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) - if err := handshaker.Handshake(proxyApp); err != nil { - return fmt.Errorf("error during handshake: %v", err) - } - return nil + return handshaker.Handshake(proxyApp) } func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { @@ -376,6 +378,7 @@ func createMempoolAndMempoolReactor( state sm.State, memplMetrics *mempl.Metrics, logger log.Logger, + traceClient *trace.Client, ) (mempl.Mempool, p2p.Reactor) { switch config.Mempool.Version { case cfg.MempoolV2: @@ -392,8 +395,9 @@ func createMempoolAndMempoolReactor( reactor, err := mempoolv2.NewReactor( mp, &mempoolv2.ReactorOptions{ - ListenOnly: !config.Mempool.Broadcast, - MaxTxSize: config.Mempool.MaxTxBytes, + ListenOnly: !config.Mempool.Broadcast, + MaxTxSize: config.Mempool.MaxTxBytes, + TraceClient: traceClient, }, ) if err != nil { @@ -420,6 +424,7 @@ func createMempoolAndMempoolReactor( reactor := mempoolv1.NewReactor( config.Mempool, mp, + traceClient, ) if config.Consensus.WaitForTxs() { mp.EnableTxsAvailable() @@ -508,7 +513,7 @@ func createConsensusReactor(config *cfg.Config, waitSync bool, eventBus *types.EventBus, consensusLogger log.Logger, - evCollector *trace.Client, + traceClient *trace.Client, ) (*cs.Reactor, *cs.State) { consensusState := cs.NewState( config.Consensus, @@ -518,13 +523,18 @@ func createConsensusReactor(config *cfg.Config, mempool, evidencePool, cs.StateMetrics(csMetrics), - cs.SetEventCollector(evCollector), + cs.SetTraceClient(traceClient), ) consensusState.SetLogger(consensusLogger) if privValidator != nil { consensusState.SetPrivValidator(privValidator) } - consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) + consensusReactor := cs.NewReactor( + consensusState, + waitSync, + cs.ReactorMetrics(csMetrics), + cs.ReactorTracing(traceClient), + ) consensusReactor.SetLogger(consensusLogger) // services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor @@ -814,8 +824,10 @@ func NewNode(config *cfg.Config, // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, // and replays any blocks as necessary to sync CometBFT with the app. consensusLogger := logger.With("module", "consensus") + var softwareVersion string if !stateSync { - if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + softwareVersion, err = doHandshake(context.TODO(), stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger) + if err != nil { return nil, err } @@ -826,6 +838,12 @@ func NewNode(config *cfg.Config, if err != nil { return nil, fmt.Errorf("cannot load state: %w", err) } + } else { + resp, err := proxyApp.Query().InfoSync(proxy.RequestInfo) + if err != nil { + return nil, fmt.Errorf("error during info call: %w", err) + } + softwareVersion = resp.Version } // Determine whether we should do fast sync. This must happen after the handshake, since the @@ -834,7 +852,7 @@ func NewNode(config *cfg.Config, logNodeStartupInfo(state, pubKey, logger, consensusLogger) - csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) + csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID, softwareVersion) // create an optional influxdb client to send arbitrary data to a remote // influxdb server. This is used to collect trace data from many different nodes @@ -850,7 +868,7 @@ func NewNode(config *cfg.Config, } // Make MempoolReactor - mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) + mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger, influxdbClient) // Make Evidence Reactor evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) @@ -898,7 +916,7 @@ func NewNode(config *cfg.Config, ) stateSyncReactor.SetLogger(logger.With("module", "statesync")) - nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) + nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state, softwareVersion) if err != nil { return nil, err } @@ -1019,6 +1037,18 @@ func (n *Node) OnStart() error { n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) } + if n.config.Instrumentation.PyroscopeURL != "" { + profiler, tracer, err := setupPyroscope( + n.config.Instrumentation, + string(n.nodeKey.ID()), + ) + if err != nil { + return err + } + n.pyroscopeProfiler = profiler + n.pyroscopeTracer = tracer + } + // Start the transport. addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) if err != nil { @@ -1105,12 +1135,14 @@ func (n *Node) OnStop() { } if n.blockStore != nil { + n.Logger.Info("Closing blockstore") if err := n.blockStore.Close(); err != nil { n.Logger.Error("problem closing blockstore", "err", err) } } if n.stateStore != nil { + n.Logger.Info("Closing statestore") if err := n.stateStore.Close(); err != nil { n.Logger.Error("problem closing statestore", "err", err) } @@ -1119,6 +1151,25 @@ func (n *Node) OnStop() { if n.influxDBClient != nil { n.influxDBClient.Stop() } + + if n.pyroscopeProfiler != nil { + if err := n.pyroscopeProfiler.Stop(); err != nil { + n.Logger.Error("Pyroscope profiler Stop", "err", err) + } + } + + if n.pyroscopeTracer != nil { + if err := n.pyroscopeTracer.Shutdown(context.Background()); err != nil { + n.Logger.Error("Pyroscope tracer Shutdown", "err", err) + } + } + + if n.evidencePool != nil { + n.Logger.Info("Closing evidencestore") + if err := n.EvidencePool().Close(); err != nil { + n.Logger.Error("problem closing evidencestore", "err", err) + } + } } // ConfigureRPC makes sure RPC has all the objects it needs to operate. @@ -1383,6 +1434,7 @@ func makeNodeInfo( txIndexer txindex.TxIndexer, genDoc *types.GenesisDoc, state sm.State, + softwareVersion string, ) (p2p.DefaultNodeInfo, error) { txIndexerStatus := "on" if _, ok := txIndexer.(*null.TxIndex); ok { @@ -1409,7 +1461,7 @@ func makeNodeInfo( ), DefaultNodeID: nodeKey.ID(), Network: genDoc.ChainID, - Version: version.TMCoreSemVer, + Version: softwareVersion, Channels: []byte{ bcChannel, cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, diff --git a/node/tracing.go b/node/tracing.go new file mode 100644 index 0000000000..4e2e00f76e --- /dev/null +++ b/node/tracing.go @@ -0,0 +1,85 @@ +package node + +import ( + "github.com/pyroscope-io/client/pyroscope" + "github.com/tendermint/tendermint/config" + + otelpyroscope "github.com/pyroscope-io/otel-profiling-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/propagation" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// setupPyroscope sets up pyroscope profiler and optionally tracing. +func setupPyroscope(instCfg *config.InstrumentationConfig, nodeID string) (*pyroscope.Profiler, *sdktrace.TracerProvider, error) { + tp, err := tracerProviderDebug() + if err != nil { + return nil, nil, err + } + + labels := map[string]string{"node_id": nodeID} + + if instCfg.PyroscopeTrace { + if _, err = setupTracing(instCfg.PyroscopeURL, labels); err != nil { + return nil, nil, err + } + } else { + tp = nil + } + + pflr, err := pyroscope.Start(pyroscope.Config{ + ApplicationName: "celestia", + ServerAddress: instCfg.PyroscopeURL, + Logger: nil, // use the noop logger by passing nil + Tags: labels, + ProfileTypes: toPyroscopeProfiles(instCfg.PyroscopeProfileTypes), + }) + + return pflr, tp, err +} + +func setupTracing(addr string, labels map[string]string) (tp *sdktrace.TracerProvider, err error) { + tp, err = tracerProviderDebug() + if err != nil { + return nil, err + } + + // Set the Tracer Provider and the W3C Trace Context propagator as globals. + // We wrap the tracer provider to also annotate goroutines with Span ID so + // that pprof would add corresponding labels to profiling samples. + otel.SetTracerProvider(otelpyroscope.NewTracerProvider(tp, + otelpyroscope.WithAppName("celestia"), + otelpyroscope.WithRootSpanOnly(true), + otelpyroscope.WithAddSpanName(true), + otelpyroscope.WithPyroscopeURL(addr), + otelpyroscope.WithProfileBaselineLabels(labels), + otelpyroscope.WithProfileBaselineURL(true), + otelpyroscope.WithProfileURL(true), + )) + + // Register the trace context and baggage propagators so data is propagated across services/processes. + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + )) + + return tp, err +} + +func tracerProviderDebug() (*sdktrace.TracerProvider, error) { + exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) + if err != nil { + return nil, err + } + return sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sdktrace.NewBatchSpanProcessor(exp))), nil +} + +func toPyroscopeProfiles(profiles []string) []pyroscope.ProfileType { + pts := make([]pyroscope.ProfileType, 0, len(profiles)) + for _, p := range profiles { + pts = append(pts, pyroscope.ProfileType(p)) + } + return pts +} diff --git a/p2p/metrics.go b/p2p/metrics.go index 7c80658e5d..2d02d386b8 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -87,13 +87,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Subsystem: MetricsSubsystem, Name: "message_receive_bytes_total", Help: "Number of bytes of each message type received.", - }, append(labels, "message_type")).With(labelsAndValues...), + }, append(labels, "message_type", "chID")).With(labelsAndValues...), MessageSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "message_send_bytes_total", Help: "Number of bytes of each message type sent.", - }, append(labels, "message_type")).With(labelsAndValues...), + }, append(labels, "message_type", "chID")).With(labelsAndValues...), } } diff --git a/p2p/peer.go b/p2p/peer.go index 49c0dd4e40..4de7148df6 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -331,7 +331,8 @@ func (p *peer) SendEnvelope(e Envelope) bool { } res := p.Send(e.ChannelID, msgBytes) if res { - p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes))) + p.metrics.MessageSendBytesTotal.With("message_type", + metricLabelValue, "chID", fmt.Sprintf("%#x", e.ChannelID)).Add(float64(len(msgBytes))) } return res } @@ -380,7 +381,8 @@ func (p *peer) TrySendEnvelope(e Envelope) bool { } res := p.TrySend(e.ChannelID, msgBytes) if res { - p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes))) + p.metrics.MessageSendBytesTotal.With("message_type", + metricLabelValue, "chID", fmt.Sprintf("%#x", e.ChannelID)).Add(float64(len(msgBytes))) } return res } @@ -532,7 +534,8 @@ func createMConnection( } } p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes))) - p.metrics.MessageReceiveBytesTotal.With("message_type", p.mlc.ValueToMetricLabel(msg)).Add(float64(len(msgBytes))) + p.metrics.MessageReceiveBytesTotal.With("message_type", + p.mlc.ValueToMetricLabel(msg), "chID", fmt.Sprintf("%#x", chID)).Add(float64(len(msgBytes))) if nr, ok := reactor.(EnvelopeReceiver); ok { nr.ReceiveEnvelope(Envelope{ ChannelID: chID, diff --git a/pkg/trace/README.md b/pkg/trace/README.md index 208ba5e5ca..d883e1b413 100644 --- a/pkg/trace/README.md +++ b/pkg/trace/README.md @@ -16,17 +16,17 @@ example, we're pushing a point in the consensus reactor to measure exactly when each step of consensus is reached for each node. ```go -if cs.eventCollector.IsCollecting() { - cs.eventCollector.WritePoint("consensus", map[string]interface{}{ - "roundData": []interface{}{rs.Height, rs.Round, rs.Step}, - }) -} +client.WritePoint(RoundStateTable, map[string]interface{}{ + HeightFieldKey: height, + RoundFieldKey: round, + StepFieldKey: step.String(), +}) ``` Using this method enforces the typical schema, where we are tagging (aka indexing) each point by the chain-id and the node-id, then adding the local time of the creation of the event. If you need to push a custom point, you can use -the underlying client directly. See influxdb2.WriteAPI for more details. +the underlying client directly. See `influxdb2.WriteAPI` for more details. ### Schema @@ -40,19 +40,54 @@ node. from(bucket: "e2e") |> range(start: -1h) |> filter( - fn: (r) => r["_measurement"] == "consensus" + fn: (r) => r["_measurement"] == "consensus_round_state" and r.chain_id == "ci-YREG8X" and r.node_id == "0b529c309608172a29c49979394734260b42acfb" ) ``` +We can easily retrieve all fields in a relatively standard table format by using +the pivot `fluxQL` command. + +```flux +from(bucket: "mocha") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "consensus_round_state") + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") +``` + +### Querying Data Using Python + +Python can be used to quickly search for and isolate specific patterns. + +```python +from influxdb_client import InfluxDBClient +from influxdb_client.client.write_api import SYNCHRONOUS + +client = InfluxDBClient(url="http://your-influx-url:8086/", token="your-influx-token", org="celestia") + +query_api = client.query_api() + +def create_flux_table_query(start, bucket, measurement, filter_clause): + flux_table_query = f''' + from(bucket: "{bucket}") + |> range(start: {start}) + |> filter(fn: (r) => r._measurement == "{measurement}") + {filter_clause} + |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") + ''' + return flux_table_query + +query = create_flux_table_query("-1h", "mocha", "consenus_round_state", "") +result = query_api.query(query=query) +``` ### Running a node with remote tracing on Tracing will only occur if an influxdb URL in specified either directly in the `config.toml` or as flags provided to the start sub command. -configure in the config.toml +#### Configure in the `config.toml` ```toml ####################################################### @@ -62,7 +97,7 @@ configure in the config.toml ... -# The URL of the influxdb instance to use for remote event +# The URL of the influxdb instance to use for remote event # collection. If empty, remote event collection is disabled. influx_url = "http://your-influx-ip:8086/" @@ -77,9 +112,15 @@ influx_org = "celestia" # The size of the batches that are sent to the database. influx_batch_size = 20 + +# The list of tables that are updated when tracing. All available tables and +# their schema can be found in the pkg/trace/schema package. +influx_tables = ["consensus_round_state", "mempool_tx", ] + ``` -or +or + ```sh celestia-appd start --influxdb-url=http://your-influx-ip:8086/ --influxdb-token="your-token" ``` diff --git a/pkg/trace/client.go b/pkg/trace/client.go index a3939a88a8..0ee70aebed 100644 --- a/pkg/trace/client.go +++ b/pkg/trace/client.go @@ -16,9 +16,9 @@ const ( ChainIDTag = "chain_id" ) -// EventCollectorConfig is the influxdb client configuration used for +// ClientConfigConfig is the influxdb client configuration used for // collecting events. -type EventCollectorConfig struct { +type ClientConfigConfig struct { // URL is the influxdb url. URL string `mapstructure:"influx_url"` // Token is the influxdb token. @@ -31,16 +31,6 @@ type EventCollectorConfig struct { BatchSize int `mapstructure:"influx_batch_size"` } -// DefaultEventCollectorConfig returns the default configuration. -func DefaultEventCollectorConfig() EventCollectorConfig { - return EventCollectorConfig{ - URL: "", - Org: "celestia", - Bucket: "e2e", - BatchSize: 10, - } -} - // Client is an influxdb client that can be used to push events to influxdb. It // is used to collect trace data from many different nodes in a network. If // there is no URL in the config.toml, then the underlying client is nil and no @@ -58,6 +48,10 @@ type Client struct { // nodeID is added as a tag all points nodeID string + // tables is a map from table name to the schema of that table that are + // configured to be collected. + tables map[string]struct{} + // Client is the influxdb client. This field is nil if no connection is // established. Client influxdb2.Client @@ -87,8 +81,9 @@ func NewClient(cfg *config.InstrumentationConfig, logger log.Logger, chainID, no cancel: cancel, chainID: chainID, nodeID: nodeID, + tables: sliceToMap(cfg.InfluxTables), } - if cfg == nil || cfg.InfluxURL == "" { + if cfg.InfluxURL == "" { return cli, nil } cli.Client = influxdb2.NewClientWithOptions( @@ -125,8 +120,12 @@ func (c *Client) logErrors(logger log.Logger) { } // IsCollecting returns true if the client is collecting events. -func (c *Client) IsCollecting() bool { - return c.Client != nil +func (c *Client) IsCollecting(table string) bool { + if c.Client == nil { + return false + } + _, has := c.tables[table] + return has } // WritePoint async writes a point to influxdb. To enforce the schema, it @@ -135,7 +134,7 @@ func (c *Client) IsCollecting() bool { // nothing. The "table" arg is used as the influxdb "measurement" for the point. // If other tags are needed, use WriteCustomPoint. func (c *Client) WritePoint(table string, fields map[string]interface{}) { - if !c.IsCollecting() { + if !c.IsCollecting(table) { return } writeAPI := c.Client.WriteAPI(c.cfg.InfluxOrg, c.cfg.InfluxBucket) @@ -146,3 +145,11 @@ func (c *Client) WritePoint(table string, fields map[string]interface{}) { p := write.NewPoint(table, tags, fields, time.Now()) writeAPI.WritePoint(p) } + +func sliceToMap(tables []string) map[string]struct{} { + m := make(map[string]struct{}) + for _, s := range tables { + m[s] = struct{}{} + } + return m +} diff --git a/pkg/trace/doc.go b/pkg/trace/doc.go index 9372af5839..3d6521464e 100644 --- a/pkg/trace/doc.go +++ b/pkg/trace/doc.go @@ -18,8 +18,8 @@ each step of consensus is reached for each node. ```go - if cs.eventCollector.IsCollecting() { - cs.eventCollector.WritePoint("consensus", map[string]interface{}{ + if cs.traceClient.IsCollecting() { + cs.traceClient.WritePoint("consensus", map[string]interface{}{ "roundData": []interface{}{rs.Height, rs.Round, rs.Step}, }) } diff --git a/pkg/trace/flags.go b/pkg/trace/flags.go index a703b427ad..5d8b2a44ad 100644 --- a/pkg/trace/flags.go +++ b/pkg/trace/flags.go @@ -5,4 +5,9 @@ const ( FlagInfluxDBToken = "influxdb-token" FlagInfluxDBURLDescription = "URL of the InfluxDB instance to use for arbitrary data collection. If not specified, data will not be collected" FlagInfluxDBTokenDescription = "Token to use when writing to the InfluxDB instance. Must be specified if 'influxdb-url' is specified" //nolint:gosec + + FlagPyroscopeURL = "pyroscope-url" + FlagPyroscopeURLDescription = "URL of the Pyroscope instance to use for continuous profiling. If not specified, profiling will not be enabled" + FlagPyroscopeTrace = "pyroscope-trace" + FlagPyroscopeTraceDescription = "enable adding trace data to pyroscope profiling" ) diff --git a/pkg/trace/schema/consensus.go b/pkg/trace/schema/consensus.go new file mode 100644 index 0000000000..3fea523853 --- /dev/null +++ b/pkg/trace/schema/consensus.go @@ -0,0 +1,130 @@ +package schema + +import ( + cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/types" +) + +// ConsensusTables returns the list of tables that are used for consensus +// tracing. +func ConsensusTables() []string { + return []string{ + RoundStateTable, + BlockPartsTable, + BlockTable, + } +} + +// Schema constants for the consensus round state tracing database. +const ( + // RoundStateTable is the name of the table that stores the consensus + // state traces. Follows this schema: + // + // | time | height | round | step | + RoundStateTable = "consensus_round_state" + + // StepFieldKey is the name of the field that stores the consensus step. The + // value is a string. + StepFieldKey = "step" +) + +// WriteRoundState writes a tracing point for a tx using the predetermined +// schema for consensus state tracing. This is used to create a table in the following +// schema: +// +// | time | height | round | step | +func WriteRoundState(client *trace.Client, height int64, round int32, step cstypes.RoundStepType) { + client.WritePoint(RoundStateTable, map[string]interface{}{ + HeightFieldKey: height, + RoundFieldKey: round, + StepFieldKey: step.String(), + }) +} + +// Schema constants for the "consensus_block_parts" table. +const ( + // BlockPartsTable is the name of the table that stores the consensus block + // parts. + // following schema: + // + // | time | height | round | index | peer | transfer type | + BlockPartsTable = "consensus_block_parts" + + // BlockPartIndexFieldKey is the name of the field that stores the block + // part + BlockPartIndexFieldKey = "index" +) + +// WriteBlockPart writes a tracing point for a BlockPart using the predetermined +// schema for consensus state tracing. This is used to create a table in the +// following schema: +// +// | time | height | round | index | peer | transfer type | +func WriteBlockPart( + client *trace.Client, + height int64, + round int32, + peer p2p.ID, + index uint32, + transferType string, +) { + // this check is redundant to what is checked during WritePoint, although it + // is an optimization to avoid allocations from the map of fields. + if !client.IsCollecting(BlockPartsTable) { + return + } + client.WritePoint(BlockPartsTable, map[string]interface{}{ + HeightFieldKey: height, + RoundFieldKey: round, + BlockPartIndexFieldKey: index, + PeerFieldKey: peer, + TransferTypeFieldKey: transferType, + }) +} + +const ( + // BlockTable is the name of the table that stores metadata about consensus blocks. + // following schema: + // + // | time | height | timestamp | + BlockTable = "consensus_block" + + // UnixMillisecondTimestampFieldKey is the name of the field that stores the timestamp in + // the last commit in unix milliseconds. + UnixMillisecondTimestampFieldKey = "unix_millisecond_timestamp" + + // TxCountFieldKey is the name of the field that stores the number of + // transactions in the block. + TxCountFieldKey = "tx_count" + + // SquareSizeFieldKey is the name of the field that stores the square size + // of the block. SquareSize is the number of shares in a single row or + // column of the origianl data square. + SquareSizeFieldKey = "square_size" + + // BlockSizeFieldKey is the name of the field that stores the size of + // the block data in bytes. + BlockSizeFieldKey = "block_size" + + // ProposerFieldKey is the name of the field that stores the proposer of + // the block. + ProposerFieldKey = "proposer" + + // LastCommitRoundFieldKey is the name of the field that stores the round + // of the last commit. + LastCommitRoundFieldKey = "last_commit_round" +) + +func WriteBlock(client *trace.Client, block *types.Block, size int) { + client.WritePoint(BlockTable, map[string]interface{}{ + HeightFieldKey: block.Height, + UnixMillisecondTimestampFieldKey: block.Time.UnixMilli(), + TxCountFieldKey: len(block.Data.Txs), + SquareSizeFieldKey: block.SquareSize, + BlockSizeFieldKey: size, + ProposerFieldKey: block.ProposerAddress.String(), + LastCommitRoundFieldKey: block.LastCommit.Round, + }) +} diff --git a/pkg/trace/schema/mempool.go b/pkg/trace/schema/mempool.go new file mode 100644 index 0000000000..89c53f102d --- /dev/null +++ b/pkg/trace/schema/mempool.go @@ -0,0 +1,122 @@ +package schema + +import ( + "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/types" +) + +// MempoolTables returns the list of tables for mempool tracing. +func MempoolTables() []string { + return []string{ + MempoolTxTable, + MempoolPeerStateTable, + } +} + +// Schema constants for the mempool_tx table +const ( + // MempoolTxTable is the tracing "measurement" (aka table) for the mempool + // that stores tracing data related to gossiping transactions. + // + // The schema for this table is: + // | time | peerID | tx size | tx hash | transfer type | mempool version | + MempoolTxTable = "mempool_tx" + + // TxFieldKey is the tracing field key for receiving for sending a + // tx. This should take the form of a tx hash as the value. + TxFieldKey = "tx" + + // SizeFieldKey is the tracing field key for the size of a tx. This + // should take the form of the size of the tx as the value. + SizeFieldKey = "size" + + // VersionFieldKey is the tracing field key for the version of the mempool. + // This is used to distinguish between versions of the mempool. + VersionFieldKey = "version" + + // V1VersionFieldValue is a tracing field value for the version of + // the mempool. This value is used by the "version" field key. + V1VersionFieldValue = "v1" + + // CatVersionFieldValue is a tracing field value for the version of + // the mempool. This value is used by the "version" field key. + CatVersionFieldValue = "cat" +) + +// WriteMempoolTx writes a tracing point for a tx using the predetermined +// schema for mempool tracing. This is used to create a table in the following +// schema: +// +// | time | peerID | tx size | tx hash | transfer type | mempool version | +func WriteMempoolTx(client *trace.Client, peer p2p.ID, tx []byte, transferType, version string) { + // this check is redundant to what is checked during WritePoint, although it + // is an optimization to avoid allocations from the map of fields. + if !client.IsCollecting(MempoolTxTable) { + return + } + client.WritePoint(MempoolTxTable, map[string]interface{}{ + TxFieldKey: bytes.HexBytes(types.Tx(tx).Hash()).String(), + PeerFieldKey: peer, + SizeFieldKey: len(tx), + TransferTypeFieldKey: transferType, + VersionFieldKey: version, + }) +} + +const ( + // MempoolPeerState is the tracing "measurement" (aka table) for the mempool + // that stores tracing data related to mempool state, specifically + // the gossipping of "SeenTx" and "WantTx". + // + // The schema for this table is: + // | time | peerID | update type | mempool version | + MempoolPeerStateTable = "mempool_peer_state" + + // StateUpdateFieldKey is the tracing field key for state updates of the mempool. + StateUpdateFieldKey = "update" + + // SeenTxStateUpdateFieldValue is a tracing field value for the state + // update of the mempool. This value is used by the "update" field key. + SeenTxStateUpdateFieldValue = "seen_tx" + + // WantTxStateUpdateFieldValue is a tracing field value for the state + // update of the mempool. This value is used by the "update" field key. + WantTxStateUpdateFieldValue = "want_tx" + + // RemovedTxStateUpdateFieldValue is a tracing field value for the local + // state update of the mempool. This value is used by the "update" field + // key. + RemovedTxStateUpdateFieldValue = "removed_tx" + + // AddedTxStateUpdateFieldValue is a tracing field value for the local state + // update of the mempool. This value is used by the "update" field key. + AddedTxStateUpdateFieldValue = "added_tx" +) + +// WriteMempoolPeerState writes a tracing point for the mempool state using +// the predetermined schema for mempool tracing. This is used to create a table +// in the following schema: +// +// | time | peerID | transfer type | state update | mempool version | +func WriteMempoolPeerState(client *trace.Client, peer p2p.ID, stateUpdate, transferType, version string) { + // this check is redundant to what is checked during WritePoint, although it + // is an optimization to avoid allocations from creating the map of fields. + if !client.IsCollecting(RoundStateTable) { + return + } + client.WritePoint(RoundStateTable, map[string]interface{}{ + PeerFieldKey: peer, + TransferTypeFieldKey: transferType, + StateUpdateFieldKey: stateUpdate, + VersionFieldKey: version, + }) +} + +const ( +// LocalTable is the tracing "measurement" (aka table) for the local mempool +// updates, such as when a tx is added or removed. +// TODO: actually implement the local mempool tracing +// LocalTable = "mempool_local" +) diff --git a/pkg/trace/schema/tables.go b/pkg/trace/schema/tables.go new file mode 100644 index 0000000000..2c8c9ef97d --- /dev/null +++ b/pkg/trace/schema/tables.go @@ -0,0 +1,41 @@ +package schema + +import "github.com/tendermint/tendermint/config" + +func init() { + config.DefaultInfluxTables = AllTables() +} + +func AllTables() []string { + tables := []string{} + tables = append(tables, MempoolTables()...) + tables = append(tables, ConsensusTables()...) + return tables +} + +// General purpose schema constants used across multiple tables +const ( + // PeerFieldKey is the tracing field key for the peer that sent or + // received a tx. This should take the form of the peer's address as the + // value. + PeerFieldKey = "peer" + + // TransferTypeFieldKey is the tracing field key for the class of a tx. + TransferTypeFieldKey = "transfer_type" + + // TransferTypeDownload is a tracing field value for receiving some + // data from a peer. This value is used by the "TransferType" field key. + TransferTypeDownload = "download" + + // TransferTypeUpload is a tracing field value for sending some data + // to a peer. This value is used by the "TransferType" field key. + TransferTypeUpload = "upload" + + // RoundFieldKey is the name of the field that stores the consensus round. + // The value is an int32. + RoundFieldKey = "round" + + // HeightFieldKey is the name of the field that stores the consensus height. + // The value is an int64. + HeightFieldKey = "height" +) diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 4947a44161..12fd717e8d 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -133,6 +133,14 @@ message RequestPrepareProposal { tendermint.types.Data block_data = 1; // If an application decides to populate block_data with extra information, they can not exceed this value. int64 block_data_size = 2; + // chain_id is a unique identifier for the blockchain network this proposal + // belongs to (e.g. mocha-1). + string chain_id = 3; + // height is the height of the proposal block + int64 height = 4; + // time is the time that will end up in the header. This is the voting power + // weighted median of the last commit. + google.protobuf.Timestamp time = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } message RequestProcessProposal { diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index 2fe5aa16aa..d3ccf4a28f 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -95,12 +95,12 @@ message Data { // field number 4 is reserved for blobs // SquareSize is the number of rows or columns in the original data square. - uint64 square_size = 5; + uint64 square_size = 5; // Hash is the root of a binary Merkle tree where the leaves of the tree are // the row and column roots of an extended data square. Hash is often referred // to as the "data root". - bytes hash = 6; + bytes hash = 6; } // Blob (named after binary large object) is a chunk of data submitted by a user @@ -108,9 +108,9 @@ message Data { // to a namespace and is encoded into shares based on the format specified by // share_version. message Blob { - bytes namespace_id = 1; - bytes data = 2; - uint32 share_version = 3; + bytes namespace_id = 1; + bytes data = 2; + uint32 share_version = 3; uint32 namespace_version = 4; } @@ -211,17 +211,17 @@ message BlobTx { // ShareProof is an NMT proof that a set of shares exist in a set of rows and a // Merkle proof that those rows exist in a Merkle tree with a given data root. message ShareProof { - repeated bytes data = 1; - repeated NMTProof share_proofs = 2; - bytes namespace_id = 3; - RowProof row_proof = 4; - uint32 namespace_version = 5; + repeated bytes data = 1; + repeated NMTProof share_proofs = 2; + bytes namespace_id = 3; + RowProof row_proof = 4; + uint32 namespace_version = 5; } // RowProof is a Merkle proof that a set of rows exist in a Merkle tree with a // given data root. message RowProof { - repeated bytes row_roots = 1; + repeated bytes row_roots = 1; repeated tendermint.crypto.Proof proofs = 2; bytes root = 3; uint32 start_row = 4; diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index d2198f53fe..e7b8cdadcf 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -261,12 +261,11 @@ func To32PaddedHexBytes(number uint64) ([]byte, error) { // The commitments will be signed by orchestrators and submitted to an EVM chain via a relayer. // For more information: https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol type DataRootTuple struct { - height uint64 - dataRoot [32]byte - squareSize uint64 + height uint64 + dataRoot [32]byte } -// EncodeDataRootTuple takes a height, a data root and the square size, and returns the equivalent of +// EncodeDataRootTuple takes a height and a data root, and returns the equivalent of // `abi.encode(...)` in Ethereum. // The encoded type is a DataRootTuple, which has the following ABI: // @@ -283,11 +282,6 @@ type DataRootTuple struct { // "type":"bytes32" // }, // { -// "internalType":"uint256", -// "name":"squareSize", -// "type":"uint256" -// }, -// { // "internalType":"structDataRootTuple", // "name":"_tuple", // "type":"tuple" @@ -295,21 +289,15 @@ type DataRootTuple struct { // ] // } // -// padding the hex representation of the height padded to 32 bytes concatenated to the data root concatenated -// to the hex representation of the square size padded to 32 bytes. +// padding the hex representation of the height padded to 32 bytes concatenated to the data root. // For more information, refer to: // https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol -func EncodeDataRootTuple(height uint64, dataRoot [32]byte, squareSize uint64) ([]byte, error) { +func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { paddedHeight, err := To32PaddedHexBytes(height) if err != nil { return nil, err } - dataSlice := dataRoot[:] - paddedSquareSize, err := To32PaddedHexBytes(squareSize) - if err != nil { - return nil, err - } - return append(paddedHeight, append(dataSlice, paddedSquareSize...)...), nil + return append(paddedHeight, dataRoot[:]...), nil } // validateDataCommitmentRange runs basic checks on the asc sorted list of @@ -330,9 +318,10 @@ func validateDataCommitmentRange(start uint64, end uint64) error { if start >= end { return fmt.Errorf("last block is smaller than first block") } - if end > uint64(env.BlockStore.Height()) { + // the data commitment range is end exclusive + if end > uint64(env.BlockStore.Height())+1 { return fmt.Errorf( - "last block %d is higher than current chain height %d", + "end block %d is higher than current chain height %d", end, env.BlockStore.Height(), ) @@ -348,7 +337,6 @@ func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { encodedTuple, err := EncodeDataRootTuple( tuple.height, tuple.dataRoot, - tuple.squareSize, ) if err != nil { return nil, err @@ -384,7 +372,6 @@ func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, e encodedTuple, err := EncodeDataRootTuple( tuple.height, tuple.dataRoot, - tuple.squareSize, ) if err != nil { return nil, err @@ -525,9 +512,8 @@ func fetchDataRootTuples(start, end uint64) ([]DataRootTuple, error) { return nil, fmt.Errorf("couldn't load block %d", height) } tuples = append(tuples, DataRootTuple{ - height: uint64(block.Height), - dataRoot: *(*[32]byte)(block.DataHash), - squareSize: block.SquareSize, + height: uint64(block.Height), + dataRoot: *(*[32]byte)(block.DataHash), }) } return tuples, nil diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 5745102951..31a37f0ef9 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -126,26 +126,23 @@ func TestBlockResults(t *testing.T) { func TestEncodeDataRootTuple(t *testing.T) { height := uint64(2) dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") - squareSize := uint64(64) require.NoError(t, err) expectedEncoding, err := hex.DecodeString( // hex representation of height padded to 32 bytes "0000000000000000000000000000000000000000000000000000000000000002" + // data root - "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013" + - // original square size - "0000000000000000000000000000000000000000000000000000000000000040", + "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", ) require.NoError(t, err) require.NotNil(t, expectedEncoding) - actualEncoding, err := EncodeDataRootTuple(height, *(*[32]byte)(dataRoot), squareSize) + actualEncoding, err := EncodeDataRootTuple(height, *(*[32]byte)(dataRoot)) require.NoError(t, err) require.NotNil(t, actualEncoding) // Check that the length of packed data is correct - assert.Equal(t, len(actualEncoding), 96) + assert.Equal(t, len(actualEncoding), 64) assert.Equal(t, expectedEncoding, actualEncoding) } @@ -171,6 +168,11 @@ func TestDataCommitmentResults(t *testing.T) { {0, 1000, false}, {0, 10, false}, {10, 8, false}, + // to test the end exclusive support for ranges. + // the end block could be equal to (height+1), but the data commitment would only + // take up to height. So we should be able to send request having end block equal + // to (height+1). + {int(env.BlockStore.Height()) - 100, int(env.BlockStore.Height()) + 1, true}, } for i, tc := range testCases { @@ -191,7 +193,6 @@ func TestDataCommitmentResults(t *testing.T) { encodedTuple, err := EncodeDataRootTuple( uint64(blocks[tc.beginQuery+i].Height), *(*[32]byte)(blocks[tc.beginQuery+i].DataHash), - blocks[tc.beginQuery+i].SquareSize, ) require.NoError(t, err) dataRootEncodedTuples[i] = encodedTuple @@ -266,7 +267,6 @@ func TestDataRootInclusionProofResults(t *testing.T) { encodedTuple, err := EncodeDataRootTuple( uint64(blocks[tc.firstQuery+i].Height), *(*[32]byte)(blocks[tc.firstQuery+i].DataHash), - blocks[tc.firstQuery+i].SquareSize, ) require.NoError(t, err) dataRootEncodedTuples[i] = encodedTuple @@ -338,10 +338,10 @@ func (indexer mockBlockIndexer) Search(ctx context.Context, _ *query.Query) ([]i return results, nil } -// randomBlocks generates a set of random blocks up to the provided height. +// randomBlocks generates a set of random blocks up to (and including) the provided height. func randomBlocks(height int64) []*types.Block { - blocks := make([]*types.Block, height) - for i := int64(0); i < height; i++ { + blocks := make([]*types.Block, height+1) + for i := int64(0); i <= height; i++ { blocks[i] = randomBlock(i) } return blocks diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 39c6859c13..ed00f61dc3 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -5,6 +5,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" cmtnet "github.com/tendermint/tendermint/libs/net" ) @@ -26,8 +27,7 @@ func StartGRPCServer(ln net.Listener) error { // StartGRPCClient dials the gRPC server using protoAddr and returns a new // BroadcastAPIClient. func StartGRPCClient(protoAddr string) BroadcastAPIClient { - //nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option. - conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial(protoAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc)) if err != nil { panic(err) } diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 9c928f1f04..76fa5063f9 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -139,6 +139,8 @@ var _ HTTPClient = (*Client)(nil) var _ Caller = (*Client)(nil) var _ Caller = (*RequestBatch)(nil) +var _ fmt.Stringer = (*Client)(nil) + // New returns a Client pointed at the given address. // An error is returned on invalid remote. The function panics when remote is nil. func New(remote string) (*Client, error) { @@ -232,6 +234,10 @@ func getHTTPRespErrPrefix(resp *http.Response) string { return fmt.Sprintf("error in json rpc client, with http response metadata: (Status: %s, Protocol %s)", resp.Status, resp.Proto) } +func (c *Client) String() string { + return fmt.Sprintf("&Client{user=%v, addr=%v, client=%v, nextReqID=%v}", c.username, c.address, c.client, c.nextReqID) +} + // NewRequestBatch starts a batch of requests for this client. func (c *Client) NewRequestBatch() *RequestBatch { return &RequestBatch{ diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 00b88e85f2..f12c6fe6fe 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -25,7 +25,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han fmt.Errorf("error reading request body: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, http.StatusBadRequest, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -48,7 +48,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han if err := json.Unmarshal(b, &request); err != nil { res := types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -122,7 +122,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han wErr = WriteRPCResponseHTTP(w, responses...) } if wErr != nil { - logger.Error("failed to write responses", "res", responses, "err", wErr) + logger.Error("failed to write responses", "err", wErr) } } } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 6dd772e3d9..29eae9fc32 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -188,7 +188,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler // If RPCResponse if res, ok := e.(types.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } else { // Panics can contain anything, attempt to normalize it as an error. @@ -207,7 +207,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler res := types.RPCInternalError(types.JSONRPCIntID(-1), err) if wErr := WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } } diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 56f8274c95..ebed6eba43 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -27,7 +27,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit return func(w http.ResponseWriter, r *http.Request) { res := types.RPCMethodNotFoundError(dummyID) if wErr := WriteRPCResponseHTTPError(w, http.StatusNotFound, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } } @@ -45,7 +45,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit fmt.Errorf("error converting http params to arguments: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -58,7 +58,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit if err != nil { if err := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, types.RPCInternalError(dummyID, err)); err != nil { - logger.Error("failed to write response", "res", result, "err", err) + logger.Error("failed to write response", "err", err) return } return @@ -71,7 +71,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit err = WriteRPCResponseHTTP(w, resp) } if err != nil { - logger.Error("failed to write response", "res", result, "err", err) + logger.Error("failed to write response", "err", err) return } } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 343b1f918c..068d006287 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -825,7 +825,7 @@ paths: required: true schema: type: string - example: "tx.height=1000" + example: '"tx.height=1000"' - in: query name: prove description: Include proofs of the transactions inclusion in the block @@ -896,7 +896,7 @@ paths: required: true schema: type: string - example: "block.height > 1000 AND valset.changed > 0" + example: '"block.height > 1000 AND valset.changed > 0"' - in: query name: page description: "Page number (1-based)" @@ -1108,7 +1108,7 @@ paths: required: true schema: type: string - example: "/a/b/c" + example: '"/a/b/c"' - in: query name: data description: Data diff --git a/state/execution.go b/state/execution.go index 1fcf43a982..fc5a929af9 100644 --- a/state/execution.go +++ b/state/execution.go @@ -115,10 +115,21 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // https://github.com/tendermint/tendermint/issues/77 txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) + var timestamp time.Time + if height == state.InitialHeight { + timestamp = state.LastBlockTime // genesis time + } else { + timestamp = MedianTime(commit, state.LastValidators) + } + preparedProposal, err := blockExec.proxyApp.PrepareProposalSync( abci.RequestPrepareProposal{ BlockData: &cmtproto.Data{Txs: txs.ToSliceOfBytes()}, - BlockDataSize: maxDataBytes}, + BlockDataSize: maxDataBytes, + ChainId: state.ChainID, + Height: height, + Time: timestamp, + }, ) if err != nil { // The App MUST ensure that only valid (and hence 'processable') transactions @@ -237,7 +248,7 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, 0, err } if len(validatorUpdates) > 0 { - blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) + blockExec.logger.Info("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) } // Update the state with the block and responses. @@ -405,7 +416,7 @@ func execBlockOnProxyApp( return nil, err } - logger.Info("executed block", "height", block.Height, "num_valid_txs", validTxs, "num_invalid_txs", invalidTxs, "time", block.Time) + logger.Info("executed block", "height", block.Height, "num_valid_txs", validTxs, "num_invalid_txs", invalidTxs) return abciResponses, nil } diff --git a/state/indexer/block/kv/kv.go b/state/indexer/block/kv/kv.go index 30a71d05d8..a20e3c7afb 100644 --- a/state/indexer/block/kv/kv.go +++ b/state/indexer/block/kv/kv.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "math/big" "sort" "strconv" "strings" @@ -311,9 +312,10 @@ LOOP: continue } - if _, ok := qr.AnyBound().(int64); ok { - v, err := strconv.ParseInt(eventValue, 10, 64) - if err != nil { + if _, ok := qr.AnyBound().(*big.Int); ok { + v := new(big.Int) + v, ok := v.SetString(eventValue, 10) + if !ok { // If the number was not int it might be a float but this behavior is kept the same as before the patch continue LOOP } @@ -385,15 +387,16 @@ func (idx *BlockerIndexer) setTmpHeights(tmpHeights map[string][]byte, it dbm.It } } -func checkBounds(ranges indexer.QueryRange, v int64) bool { +func checkBounds(ranges indexer.QueryRange, v *big.Int) bool { include := true lowerBound := ranges.LowerBoundValue() upperBound := ranges.UpperBoundValue() - if lowerBound != nil && v < lowerBound.(int64) { + + if lowerBound != nil && v.Cmp(lowerBound.(*big.Int)) == -1 { include = false } - if upperBound != nil && v > upperBound.(int64) { + if upperBound != nil && v.Cmp(upperBound.(*big.Int)) == 1 { include = false } diff --git a/state/indexer/block/kv/kv_test.go b/state/indexer/block/kv/kv_test.go index 506088398a..e3caca6df2 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/state/indexer/block/kv/kv_test.go @@ -358,3 +358,132 @@ func TestBlockIndexerMulti(t *testing.T) { }) } } + +func TestBigInt(t *testing.T) { + + bigInt := "10000000000000000000" + store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + indexer := blockidxkv.New(store) + + require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ + Header: types.Header{Height: 1}, + ResultBeginBlock: abci.ResponseBeginBlock{ + Events: []abci.Event{}, + }, + ResultEndBlock: abci.ResponseEndBlock{ + Events: []abci.Event{ + { + Type: "end_event", + Attributes: []abci.EventAttribute{ + { + Key: []byte("foo"), + Value: []byte("100"), + Index: true, + }, + { + Key: []byte("bar"), + Value: []byte("10000000000000000000.76"), + Index: true, + }, + { + Key: []byte("bar_lower"), + Value: []byte("10000000000000000000.1"), + Index: true, + }, + }, + }, + { + Type: "end_event", + Attributes: []abci.EventAttribute{ + { + Key: []byte("foo"), + Value: []byte(bigInt), + Index: true, + }, + { + Key: []byte("bar"), + Value: []byte("500"), + Index: true, + }, + { + Key: []byte("bla"), + Value: []byte("500.5"), + Index: true, + }, + }, + }, + }, + }, + })) + + testCases := map[string]struct { + q *query.Query + results []int64 + }{ + + "query return all events from a height - exact": { + q: query.MustParse("block.height = 1"), + results: []int64{1}, + }, + "query return all events from a height - exact (deduplicate height)": { + q: query.MustParse("block.height = 1 AND block.height = 2"), + results: []int64{1}, + }, + "query return all events from a height - range": { + q: query.MustParse("block.height < 2 AND block.height > 0 AND block.height > 0"), + results: []int64{1}, + }, + "query matches fields with big int and height - no match": { + q: query.MustParse("end_event.foo = " + bigInt + " AND end_event.bar = 500 AND block.height = 2"), + results: []int64{}, + }, + "query matches fields with big int with less and height - no match": { + q: query.MustParse("end_event.foo <= " + bigInt + " AND end_event.bar = 500 AND block.height = 2"), + results: []int64{}, + }, + "query matches fields with big int and height - match": { + q: query.MustParse("end_event.foo = " + bigInt + " AND end_event.bar = 500 AND block.height = 1"), + results: []int64{1}, + }, + "query matches big int in range": { + q: query.MustParse("end_event.foo = " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float - does not pass as float is not converted to int": { + q: query.MustParse("end_event.bar >= " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float - fails because float is converted to int": { + q: query.MustParse("end_event.bar > " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float lower dec point - fails because float is converted to int": { + q: query.MustParse("end_event.bar_lower > " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float with less - found": { + q: query.MustParse("end_event.foo <= " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float with less with height range - found": { + q: query.MustParse("end_event.foo <= " + bigInt + " AND block.height > 0"), + results: []int64{1}, + }, + "query matches big int in range with float with less - not found": { + q: query.MustParse("end_event.foo < " + bigInt + " AND end_event.foo > 100"), + results: []int64{}, + }, + "query does not parse float": { + q: query.MustParse("end_event.bla >= 500"), + results: []int64{}, + }, + } + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + results, err := indexer.Search(context.Background(), tc.q) + require.NoError(t, err) + require.Equal(t, tc.results, results) + }) + } +} diff --git a/state/indexer/block/kv/util.go b/state/indexer/block/kv/util.go index 8c71afb53d..6b44c8cf67 100644 --- a/state/indexer/block/kv/util.go +++ b/state/indexer/block/kv/util.go @@ -3,6 +3,7 @@ package kv import ( "encoding/binary" "fmt" + "math/big" "strconv" "github.com/google/orderedcode" @@ -135,7 +136,7 @@ func parseEventSeqFromEventKey(key []byte) (int64, error) { func lookForHeight(conditions []query.Condition) (int64, bool, int) { for i, c := range conditions { if c.CompositeKey == types.BlockHeightKey && c.Op == query.OpEqual { - return c.Operand.(int64), true, i + return c.Operand.(*big.Int).Int64(), true, i } } @@ -159,7 +160,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio continue } else { heightCondition = append(heightCondition, c) - heightInfo.height = c.Operand.(int64) + heightInfo.height = c.Operand.(*big.Int).Int64() // As height is assumed to always be int64 found = true } } else { @@ -196,7 +197,7 @@ func dedupMatchEvents(conditions []query.Condition) ([]query.Condition, bool) { for i, c := range conditions { if c.CompositeKey == types.MatchEventKey { // Match events should be added only via RPC as the very first query condition - if i == 0 && c.Op == query.OpEqual && c.Operand.(int64) == 1 { + if i == 0 && c.Op == query.OpEqual && c.Operand.(*big.Int).Int64() == 1 { dedupConditions = append(dedupConditions, c) matchEvents = true } @@ -210,7 +211,7 @@ func dedupMatchEvents(conditions []query.Condition) ([]query.Condition, bool) { func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) bool { if heightInfo.heightRange.Key != "" { - if !checkBounds(heightInfo.heightRange, keyHeight) { + if !checkBounds(heightInfo.heightRange, big.NewInt(keyHeight)) { return false } } else { diff --git a/state/indexer/query_range.go b/state/indexer/query_range.go index 27fab657ef..20ac70cd8d 100644 --- a/state/indexer/query_range.go +++ b/state/indexer/query_range.go @@ -1,6 +1,7 @@ package indexer import ( + "math/big" "time" "github.com/tendermint/tendermint/libs/pubsub/query" @@ -44,6 +45,9 @@ func (qr QueryRange) LowerBoundValue() interface{} { switch t := qr.LowerBound.(type) { case int64: return t + 1 + case *big.Int: + tmp := new(big.Int) + return tmp.Add(t, big.NewInt(1)) case time.Time: return t.Unix() + 1 @@ -67,7 +71,9 @@ func (qr QueryRange) UpperBoundValue() interface{} { switch t := qr.UpperBound.(type) { case int64: return t - 1 - + case *big.Int: + tmp := new(big.Int) + return tmp.Sub(t, big.NewInt(1)) case time.Time: return t.Unix() - 1 diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index b6979e2d52..8199dd054a 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -5,6 +5,7 @@ import ( "context" "encoding/hex" "fmt" + "math/big" "strconv" "strings" @@ -371,7 +372,7 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) func lookForHeight(conditions []query.Condition) (height int64, heightIdx int) { for i, c := range conditions { if c.CompositeKey == types.TxHeightKey && c.Op == query.OpEqual { - return c.Operand.(int64), i + return c.Operand.(*big.Int).Int64(), i } } return 0, -1 @@ -573,9 +574,11 @@ LOOP: continue } - if _, ok := qr.AnyBound().(int64); ok { - v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) - if err != nil { + if _, ok := qr.AnyBound().(*big.Int); ok { + v := new(big.Int) + eventValue := extractValueFromKey(it.Key()) + v, ok := v.SetString(eventValue, 10) + if !ok { continue LOOP } @@ -715,15 +718,15 @@ func startKey(fields ...interface{}) []byte { return b.Bytes() } -func checkBounds(ranges indexer.QueryRange, v int64) bool { +func checkBounds(ranges indexer.QueryRange, v *big.Int) bool { include := true lowerBound := ranges.LowerBoundValue() upperBound := ranges.UpperBoundValue() - if lowerBound != nil && v < lowerBound.(int64) { + if lowerBound != nil && v.Cmp(lowerBound.(*big.Int)) == -1 { include = false } - if upperBound != nil && v > upperBound.(int64) { + if upperBound != nil && v.Cmp(upperBound.(*big.Int)) == 1 { include = false } diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 40cd6a1b5b..e2e4eeecdb 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -19,6 +19,77 @@ import ( "github.com/tendermint/tendermint/types" ) +func TestBigInt(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB()) + + bigInt := "10000000000000000000" + bigIntPlus1 := "10000000000000000001" + bigFloat := bigInt + ".76" + bigFloatLower := bigInt + ".1" + + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigInt), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigIntPlus1), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigFloatLower), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("owner"), Value: []byte("/Ivan/"), Index: true}}}, + {Type: "", Attributes: []abci.EventAttribute{{Key: []byte("not_allowed"), Value: []byte("Vlad"), Index: true}}}, + }) + hash := types.Tx(txResult.Tx).Hash() + + err := indexer.Index(txResult) + + require.NoError(t, err) + + txResult2 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigFloat), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte(bigFloat), Index: true}, {Key: []byte("amount"), Value: []byte("5"), Index: true}}}, + }) + + txResult2.Tx = types.Tx("NEW TX") + txResult2.Height = 2 + txResult2.Index = 2 + + hash2 := types.Tx(txResult2.Tx).Hash() + + err = indexer.Index(txResult2) + require.NoError(t, err) + testCases := []struct { + q string + txRes *abci.TxResult + resultsLength int + }{ + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash), txResult, 1}, + // search by hash (lower) + {fmt.Sprintf("tx.hash = '%x'", hash), txResult, 1}, + {fmt.Sprintf("tx.hash = '%x'", hash2), txResult2, 1}, + // search by exact match (one key) - bigint + {"match.events = 1 AND account.number >= " + bigInt, nil, 1}, + // search by exact match (one key) - bigint range + {"match.events = 1 AND account.number >= " + bigInt + " AND tx.height > 0", nil, 1}, + {"match.events = 1 AND account.number >= " + bigInt + " AND tx.height > 0 AND account.owner = '/Ivan/'", nil, 0}, + // Floats are not parsed + {"match.events = 1 AND account.number >= " + bigInt + " AND tx.height > 0 AND account.amount > 4", txResult2, 0}, + {"match.events = 1 AND account.number >= " + bigInt + " AND tx.height > 0 AND account.amount = 5", txResult2, 0}, + {"match.events = 1 AND account.number >= " + bigInt + " AND account.amount <= 5", txResult2, 0}, + {"match.events = 1 AND account.number < " + bigInt + " AND tx.height = 1", nil, 0}, + } + + ctx := context.Background() + + for _, tc := range testCases { + tc := tc + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(ctx, query.MustParse(tc.q)) + assert.NoError(t, err) + assert.Len(t, results, tc.resultsLength) + if tc.resultsLength > 0 && tc.txRes != nil { + assert.True(t, proto.Equal(results[0], tc.txRes)) + } + }) + } +} + func TestTxIndex(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) diff --git a/state/txindex/kv/utils.go b/state/txindex/kv/utils.go index a7ca5c826e..3dfa49b76a 100644 --- a/state/txindex/kv/utils.go +++ b/state/txindex/kv/utils.go @@ -2,6 +2,7 @@ package kv import ( "fmt" + "math/big" "github.com/google/orderedcode" "github.com/tendermint/tendermint/libs/pubsub/query" @@ -33,7 +34,7 @@ func dedupMatchEvents(conditions []query.Condition) ([]query.Condition, bool) { for i, c := range conditions { if c.CompositeKey == types.MatchEventKey { // Match events should be added only via RPC as the very first query condition - if i == 0 && c.Op == query.OpEqual && c.Operand.(int64) == 1 { + if i == 0 && c.Op == query.OpEqual && c.Operand.(*big.Int).Int64() == 1 { dedupConditions = append(dedupConditions, c) matchEvents = true } @@ -79,7 +80,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio } else { found = true heightCondition = append(heightCondition, c) - heightInfo.height = c.Operand.(int64) + heightInfo.height = c.Operand.(*big.Int).Int64() //Height is always int64 } } else { heightInfo.onlyHeightEq = false @@ -110,7 +111,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) bool { if heightInfo.heightRange.Key != "" { - if !checkBounds(heightInfo.heightRange, keyHeight) { + if !checkBounds(heightInfo.heightRange, big.NewInt(keyHeight)) { return false } } else { diff --git a/test/e2e/pkg/infrastructure.go b/test/e2e/pkg/infrastructure.go index a9e1abfa5d..a1bec6fbf2 100644 --- a/test/e2e/pkg/infrastructure.go +++ b/test/e2e/pkg/infrastructure.go @@ -40,6 +40,16 @@ type InfrastructureData struct { // InfluxDBToken is the token to use when writing to the InfluxDB instance. // Must be specified if 'influxdb-url' is specified. InfluxDBToken string `json:"influxdb_token,omitempty"` + + // PyroscopeURL is the URL of the pyroscope instance to use for continuous + // profiling. If not specified, data will not be collected. + PyroscopeURL string `json:"pyroscope_url,omitempty"` + + // PyroscopeTrace enables adding trace data to pyroscope profiling. + PyroscopeTrace bool `json:"pyroscope_trace,omitempty"` + + // PyroscopeProfileTypes is the list of profile types to collect. + PyroscopeProfileTypes []string `json:"pyroscope_profile_types,omitempty"` } // InstanceData contains the relevant information for a machine instance backing diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index de98494deb..2cb9a58f4e 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -81,33 +81,36 @@ type Testnet struct { // Node represents a CometBFT node in a testnet. type Node struct { - Name string - Version string - Testnet *Testnet - Mode Mode - PrivvalKey crypto.PrivKey - NodeKey crypto.PrivKey - IP net.IP - ProxyPort uint32 - StartAt int64 - FastSync string - StateSync bool - Mempool string - Database string - ABCIProtocol Protocol - PrivvalProtocol Protocol - PersistInterval uint64 - SnapshotInterval uint64 - RetainBlocks uint64 - Seeds []*Node - PersistentPeers []*Node - Perturbations []Perturbation - Misbehaviors map[int64]string - SendNoLoad bool - Prometheus bool - PrometheusProxyPort uint32 - InfluxDBURL string - InfluxDBToken string + Name string + Version string + Testnet *Testnet + Mode Mode + PrivvalKey crypto.PrivKey + NodeKey crypto.PrivKey + IP net.IP + ProxyPort uint32 + StartAt int64 + FastSync string + StateSync bool + Mempool string + Database string + ABCIProtocol Protocol + PrivvalProtocol Protocol + PersistInterval uint64 + SnapshotInterval uint64 + RetainBlocks uint64 + Seeds []*Node + PersistentPeers []*Node + Perturbations []Perturbation + Misbehaviors map[int64]string + SendNoLoad bool + Prometheus bool + PrometheusProxyPort uint32 + InfluxDBURL string + InfluxDBToken string + PyroscopeURL string + PyroscopeTrace bool + PyroscopeProfileTypes []string } // LoadTestnet loads a testnet from a manifest file, using the filename to @@ -185,30 +188,33 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test } node := &Node{ - Name: name, - Version: v, - Testnet: testnet, - PrivvalKey: keyGen.Generate(manifest.KeyType), - NodeKey: keyGen.Generate("ed25519"), - IP: ind.IPAddress, - ProxyPort: proxyPortGen.Next(), - Mode: ModeValidator, - Database: "goleveldb", - ABCIProtocol: Protocol(testnet.ABCIProtocol), - PrivvalProtocol: ProtocolFile, - StartAt: nodeManifest.StartAt, - FastSync: nodeManifest.FastSync, - Mempool: nodeManifest.Mempool, - StateSync: nodeManifest.StateSync, - PersistInterval: 1, - SnapshotInterval: nodeManifest.SnapshotInterval, - RetainBlocks: nodeManifest.RetainBlocks, - Perturbations: []Perturbation{}, - Misbehaviors: make(map[int64]string), - SendNoLoad: nodeManifest.SendNoLoad, - InfluxDBURL: ifd.InfluxDBURL, - InfluxDBToken: ifd.InfluxDBToken, - Prometheus: testnet.Prometheus, + Name: name, + Version: v, + Testnet: testnet, + PrivvalKey: keyGen.Generate(manifest.KeyType), + NodeKey: keyGen.Generate("ed25519"), + IP: ind.IPAddress, + ProxyPort: proxyPortGen.Next(), + Mode: ModeValidator, + Database: "goleveldb", + ABCIProtocol: Protocol(testnet.ABCIProtocol), + PrivvalProtocol: ProtocolFile, + StartAt: nodeManifest.StartAt, + FastSync: nodeManifest.FastSync, + Mempool: nodeManifest.Mempool, + StateSync: nodeManifest.StateSync, + PersistInterval: 1, + SnapshotInterval: nodeManifest.SnapshotInterval, + RetainBlocks: nodeManifest.RetainBlocks, + Perturbations: []Perturbation{}, + Misbehaviors: make(map[int64]string), + SendNoLoad: nodeManifest.SendNoLoad, + InfluxDBURL: ifd.InfluxDBURL, + InfluxDBToken: ifd.InfluxDBToken, + PyroscopeURL: ifd.PyroscopeURL, + PyroscopeTrace: ifd.PyroscopeTrace, + PyroscopeProfileTypes: ifd.PyroscopeProfileTypes, + Prometheus: testnet.Prometheus, } if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 6e2f922577..425c2fa4a0 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -91,6 +91,19 @@ func NewCLI() *CLI { ifd.InfluxDBToken = itoken } + purl, err := cmd.Flags().GetString(trace.FlagPyroscopeURL) + if err != nil { + return err + } + pTrace, err := cmd.Flags().GetBool(trace.FlagPyroscopeTrace) + if err != nil { + return err + } + if ifd.PyroscopeURL == "" { + ifd.PyroscopeURL = purl + ifd.PyroscopeTrace = pTrace + } + testnet, err := e2e.LoadTestnet(m, file, ifd) if err != nil { return fmt.Errorf("loading testnet: %s", err) @@ -177,6 +190,10 @@ func NewCLI() *CLI { cli.root.PersistentFlags().String(trace.FlagInfluxDBToken, "", trace.FlagInfluxDBTokenDescription) + cli.root.PersistentFlags().String(trace.FlagPyroscopeURL, "", trace.FlagPyroscopeURLDescription) + + cli.root.PersistentFlags().Bool(trace.FlagPyroscopeTrace, false, trace.FlagPyroscopeTraceDescription) + cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false, "Preserves the running of the test net after tests are completed") diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 5a49d3b1d2..8fdecf6a77 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -170,6 +170,9 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Instrumentation.InfluxBucket = "e2e" cfg.Instrumentation.InfluxURL = node.InfluxDBURL cfg.Instrumentation.InfluxToken = node.InfluxDBToken + cfg.Instrumentation.PyroscopeTrace = node.PyroscopeTrace + cfg.Instrumentation.PyroscopeURL = node.PyroscopeURL + cfg.Instrumentation.PyroscopeProfileTypes = node.PyroscopeProfileTypes switch node.ABCIProtocol { case e2e.ProtocolUNIX: diff --git a/test/maverick/consensus/state.go b/test/maverick/consensus/state.go index ae9ebeadee..f73e4a14c6 100644 --- a/test/maverick/consensus/state.go +++ b/test/maverick/consensus/state.go @@ -900,9 +900,9 @@ func (cs *State) updateToState(state sm.State) { // to be gathered for the first block. // And alternative solution that relies on clocks: // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) - cs.StartTime = state.LastBlockTime + cs.StartTime = cs.config.Commit(cmttime.Now()) } else { - cs.StartTime = cs.config.NextStartTime(cs.StartTime) + cs.StartTime = cs.config.Commit(cs.CommitTime) } cs.Validators = validators diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go index 5cb69684dd..47ea7a98d3 100644 --- a/test/maverick/node/node.go +++ b/test/maverick/node/node.go @@ -37,6 +37,7 @@ import ( mempoolv1 "github.com/tendermint/tendermint/mempool/v1" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/pex" + "github.com/tendermint/tendermint/pkg/trace" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" rpccore "github.com/tendermint/tendermint/rpc/core" @@ -425,6 +426,7 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, reactor := mempoolv1.NewReactor( config.Mempool, mp, + &trace.Client{}, ) if config.Consensus.WaitForTxs() { mp.EnableTxsAvailable() diff --git a/types/block.go b/types/block.go index 858969852f..662478b7c0 100644 --- a/types/block.go +++ b/types/block.go @@ -1054,6 +1054,11 @@ type Blob struct { ShareVersion uint8 } +// Namespace returns the namespace of this blob encoded as a byte slice. +func (b Blob) Namespace() []byte { + return append([]byte{b.NamespaceVersion}, b.NamespaceID...) +} + // StringIndented returns an indented string representation of the transactions. func (data *Data) StringIndented(indent string) string { if data == nil { diff --git a/types/block_test.go b/types/block_test.go index b20986137b..0190e47628 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -856,3 +856,27 @@ func TestBlockIDEquals(t *testing.T) { assert.True(t, blockIDEmpty.Equals(blockIDEmpty)) assert.False(t, blockIDEmpty.Equals(blockIDDifferent)) } + +func TestBlob(t *testing.T) { + namespaceVersion := uint8(0) + namespaceID := stdbytes.Repeat([]byte{0x01}, 28) + data := []byte("data") + shareVersion := uint8(0) + + blob := Blob{ + NamespaceVersion: namespaceVersion, + NamespaceID: namespaceID, + Data: data, + ShareVersion: shareVersion, + } + + t.Run("blob.Namespace() returns encoded namespace", func(t *testing.T) { + got := blob.Namespace() + want := []byte{ + 0, // namespace version + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // namespace ID + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // namespace ID + } + assert.Equal(t, want, got) + }) +} diff --git a/version/version.go b/version/version.go index 36c9901711..ea80cbcf36 100644 --- a/version/version.go +++ b/version/version.go @@ -3,7 +3,7 @@ package version const ( // TMCoreSemVer is the used as the fallback version of CometBFT Core // when not using git describe. It is formatted with semantic versioning. - TMCoreSemVer = "0.34.28" + TMCoreSemVer = "0.34.29" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.17.0"