diff --git a/.github/auto_request_review.yml b/.github/auto_request_review.yml index fcaed3c592..a00957c0e3 100644 --- a/.github/auto_request_review.yml +++ b/.github/auto_request_review.yml @@ -12,7 +12,3 @@ options: ignored_keywords: - DO NOT REVIEW enable_group_assignment: false - - # Randomly pick reviewers up to this number. - # Do not set this option if you'd like to assign all matching reviewers. - number_of_reviewers: 2 diff --git a/.github/mergify.yml b/.github/mergify.yml index 1e60d88bb2..f32c3fafcf 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -15,3 +15,19 @@ pull_request_rules: backport: branches: - v2.x + - name: forward-port patches to main branch (v1.x) + conditions: + - base=v1.x + - label=forwardport:main + actions: + backport: + branches: + - main + - name: forward-port patches to main branch (v2.x) + conditions: + - base=v2.x + - label=forwardport:main + actions: + backport: + branches: + - main diff --git a/.github/workflows/ci-release.yml b/.github/workflows/ci-release.yml index f4bbe29713..5fe828d636 100644 --- a/.github/workflows/ci-release.yml +++ b/.github/workflows/ci-release.yml @@ -6,12 +6,9 @@ on: push: branches: - main - - "v[0-9]+.x" + - "v*" tags: - - "v[0-9]+.[0-9]+.[0-9]+" - - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" - - "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" - - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" + - "v*" pull_request: jobs: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 84f229e2e6..6ef0382d21 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -13,9 +13,9 @@ name: "CodeQL" on: push: - branches: ["main", "v[0-9].[0-9].x", "v[0-9].[0-9][0-9].x", "v[0-9].x"] + branches: ["main", "v*"] schedule: - - cron: '24 20 * * 4' + - cron: "24 20 * * 4" jobs: analyze: @@ -25,7 +25,7 @@ jobs: # - https://gh.io/supported-runners-and-hardware-resources # - https://gh.io/using-larger-runners # Consider using larger runners for possible analysis time improvements. - runs-on: 'ubuntu-latest' + runs-on: "ubuntu-latest" timeout-minutes: 360 permissions: actions: read @@ -35,7 +35,7 @@ jobs: strategy: fail-fast: false matrix: - language: ['go'] + language: ["go"] # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support steps: @@ -56,7 +56,7 @@ jobs: - uses: actions/setup-go@v5 with: - go-version-file: 'go.mod' + go-version-file: "go.mod" - name: Build binary run: | diff --git a/.github/workflows/create_release_tracking_epic.yml b/.github/workflows/create_release_tracking_epic.yml deleted file mode 100644 index 2fa90be14a..0000000000 --- a/.github/workflows/create_release_tracking_epic.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: Create Release Tracking Epic - -# This workflow creates an EPIC in the devops repo and notifies the devops team -# on slack for tracking the deployment of a release to testnets and mainnet. -on: - release: - types: [released] -jobs: - trigger_issue: - uses: celestiaorg/.github/.github/workflows/reusable_create_release_tracking_epic.yml@v0.4.3 - secrets: inherit - with: - release-repo: ${{ github.repository }} - release-version: ${{ github.event.release.tag_name }} diff --git a/.github/workflows/docker-build-publish.yml b/.github/workflows/docker-build-publish.yml index f805f343dd..fff08019ec 100644 --- a/.github/workflows/docker-build-publish.yml +++ b/.github/workflows/docker-build-publish.yml @@ -5,32 +5,35 @@ on: push: branches: - "main" - - "v[0-9].[0-9].x" - - "v[0-9].[0-9][0-9].x" - - "v[0-9].x" + - "v*" tags: - - "v[0-9]+.[0-9]+.[0-9]+" - - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" - - "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" - - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" + - "v*" pull_request: + workflow_dispatch: + inputs: + ref: + description: "The checkout reference (ie tag, branch, sha)" + required: true + type: string jobs: docker-security-build: permissions: contents: write packages: write - uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.4.3 + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.4.6 with: - dockerfile: Dockerfile + dockerfile: docker/Dockerfile + checkout_ref: ${{ github.event.inputs.ref }} secrets: inherit docker-txsim-build: permissions: contents: write packages: write - uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.4.3 + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_pipeline.yml@v0.4.6 with: - dockerfile: docker/Dockerfile_txsim + dockerfile: docker/txsim/Dockerfile packageName: txsim + checkout_ref: ${{ github.event.inputs.ref }} secrets: inherit diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4716adabf1..c41d9dd8d6 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -20,20 +20,21 @@ jobs: **/**.go go.mod go.sum - - uses: golangci/golangci-lint-action@v6.1.0 + - uses: golangci/golangci-lint-action@v6.1.1 with: - version: v1.59.1 + version: v1.61.0 args: --timeout 10m github-token: ${{ secrets.github_token }} - skip-pkg-cache: true if: env.GIT_DIFF # hadolint lints the Dockerfile hadolint: - uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_lint.yml@v0.4.3 + uses: celestiaorg/.github/.github/workflows/reusable_dockerfile_lint.yml@v0.4.6 + with: + dockerfile: "docker/Dockerfile" yamllint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: celestiaorg/.github/.github/actions/yamllint@v0.4.3 + - uses: celestiaorg/.github/.github/actions/yamllint@v0.4.6 diff --git a/.github/workflows/pr-review-requester.yml b/.github/workflows/pr-review-requester.yml index a46df08794..0e62d00940 100644 --- a/.github/workflows/pr-review-requester.yml +++ b/.github/workflows/pr-review-requester.yml @@ -11,7 +11,7 @@ on: jobs: auto-request-review: name: Auto request reviews - uses: celestiaorg/.github/.github/workflows/reusable_housekeeping.yml@v0.4.3 + uses: celestiaorg/.github/.github/workflows/reusable_housekeeping.yml@v0.4.6 secrets: inherit # write access for issues and pull requests is needed because the called # workflow requires write access to issues and pull requests and the diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 21185f78c8..b135a91611 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -41,7 +41,7 @@ jobs: run: make test-coverage - name: Upload coverage.txt - uses: codecov/codecov-action@v4.5.0 + uses: codecov/codecov-action@v4.6.0 with: file: ./coverage.txt diff --git a/.golangci.yml b/.golangci.yml index d14fe1963a..59be9b021d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,7 +4,7 @@ run: linters: enable: - - exportloopref + - copyloopvar - gofumpt - misspell - nakedret diff --git a/.goreleaser.yaml b/.goreleaser.yaml index f36e8e0435..4f08a63b64 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -108,7 +108,7 @@ archives: checksum: name_template: "checksums.txt" snapshot: - name_template: "{{ incpatch .Version }}-next" + version_template: "{{ incpatch .Version }}-next" changelog: sort: asc filters: @@ -118,4 +118,4 @@ changelog: release: prerelease: auto git: - prerelease_suffix: "-rc" + prerelease_suffix: "-" diff --git a/Makefile b/Makefile index d291c6d2b3..e095b08bb5 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,20 @@ -VERSION := $(shell echo $(shell git describe --tags 2>/dev/null || git log -1 --format='%h') | sed 's/^v//') +# GIT_TAG is an environment variable that is set to the latest git tag on the +# current commit with the following example priority: v2.2.0, v2.2.0-mocha, +# v2.2.0-arabica, v2.2.0-rc0, v2.2.0-beta, v2.2.0-alpha. If no tag points to the +# current commit, git describe is used. The priority in this command is +# necessary because `git tag --sort=-creatordate` only works for annotated tags +# with metadata. Git tags created via GitHub releases are not annotated and do +# not have metadata like creatordate. Therefore, this command is a hacky attempt +# to get the most recent tag on the current commit according to Celestia's +# testnet versioning scheme + SemVer. +GIT_TAG := $(shell git tag --points-at HEAD --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$$' \ + || git tag --points-at HEAD --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+-mocha$$' \ + || git tag --points-at HEAD --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+-arabica$$' \ + || git tag --points-at HEAD --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+-rc[0-9]*$$' \ + || git tag --points-at HEAD --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+-(beta)$$' \ + || git tag --points-at HEAD --sort=-v:refname | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+-(alpha)$$' \ + || git describe --tags) +VERSION := $(shell echo $(GIT_TAG) | sed 's/^v//') COMMIT := $(shell git rev-parse --short HEAD) DOCKER := $(shell which docker) DOCKER_BUF := $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace bufbuild/buf @@ -7,13 +23,21 @@ DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspa PROJECTNAME=$(shell basename "$(PWD)") HTTPS_GIT := https://github.com/celestiaorg/celestia-app.git PACKAGE_NAME := github.com/celestiaorg/celestia-app/v3 -GOLANG_CROSS_VERSION ?= v1.22.6 +# Before upgrading the GOLANG_CROSS_VERSION, please verify that a Docker image exists with the new tag. +# See https://github.com/goreleaser/goreleaser-cross/pkgs/container/goreleaser-cross +GOLANG_CROSS_VERSION ?= v1.23.1 +# Set this to override the max square size of the binary +OVERRIDE_MAX_SQUARE_SIZE ?= +# Set this to override the upgrade height delay of the binary +OVERRIDE_UPGRADE_HEIGHT_DELAY ?= # process linker flags ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=celestia-app \ -X github.com/cosmos/cosmos-sdk/version.AppName=celestia-appd \ -X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION) \ -X github.com/cosmos/cosmos-sdk/version.Commit=$(COMMIT) \ + -X github.com/celestiaorg/celestia-app/v3/pkg/appconsts.OverrideSquareSizeUpperBoundStr=$(OVERRIDE_MAX_SQUARE_SIZE) \ + -X github.com/celestiaorg/celestia-app/v3/pkg/appconsts.OverrideUpgradeHeightDelayStr=$(OVERRIDE_UPGRADE_HEIGHT_DELAY) BUILD_FLAGS := -tags "ledger" -ldflags '$(ldflags)' @@ -31,7 +55,7 @@ build: mod .PHONY: build ## install: Build and install the celestia-appd binary into the $GOPATH/bin directory. -install: go.sum check-bbr +install: check-bbr @echo "--> Installing celestia-appd" @go install $(BUILD_FLAGS) ./cmd/celestia-appd .PHONY: install @@ -77,13 +101,13 @@ proto-format: ## build-docker: Build the celestia-appd docker image from the current branch. Requires docker. build-docker: @echo "--> Building Docker image" - $(DOCKER) build -t celestiaorg/celestia-app -f Dockerfile . + $(DOCKER) build -t celestiaorg/celestia-app -f docker/Dockerfile . .PHONY: build-docker ## build-ghcr-docker: Build the celestia-appd docker image from the last commit. Requires docker. build-ghcr-docker: @echo "--> Building Docker image" - $(DOCKER) build -t ghcr.io/celestiaorg/celestia-app:$(COMMIT) -f Dockerfile . + $(DOCKER) build -t ghcr.io/celestiaorg/celestia-app:$(COMMIT) -f docker/Dockerfile . .PHONY: build-ghcr-docker ## publish-ghcr-docker: Publish the celestia-appd docker image. Requires docker. @@ -100,7 +124,8 @@ lint: @echo "--> Running markdownlint" @markdownlint --config .markdownlint.yaml '**/*.md' @echo "--> Running hadolint" - @hadolint Dockerfile + @hadolint docker/Dockerfile + @hadolint docker/txsim/Dockerfile @echo "--> Running yamllint" @yamllint --no-warnings . -c .yamllint.yml .PHONY: lint @@ -143,7 +168,7 @@ test-race: # TODO: Remove the -skip flag once the following tests no longer contain data races. # https://github.com/celestiaorg/celestia-app/issues/1369 @echo "--> Running tests in race mode" - @go test ./... -v -race -skip "TestPrepareProposalConsistency|TestIntegrationTestSuite|TestBlobstreamRPCQueries|TestSquareSizeIntegrationTest|TestStandardSDKIntegrationTestSuite|TestTxsimCommandFlags|TestTxsimCommandEnvVar|TestMintIntegrationTestSuite|TestBlobstreamCLI|TestUpgrade|TestMaliciousTestNode|TestBigBlobSuite|TestQGBIntegrationSuite|TestSignerTestSuite|TestPriorityTestSuite|TestTimeInPrepareProposalContext|TestBlobstream|TestCLITestSuite|TestLegacyUpgrade|TestSignerTwins|TestConcurrentTxSubmission|TestTxClientTestSuite|Test_testnode" + @go test -timeout 15m ./... -v -race -skip "TestPrepareProposalConsistency|TestIntegrationTestSuite|TestBlobstreamRPCQueries|TestSquareSizeIntegrationTest|TestStandardSDKIntegrationTestSuite|TestTxsimCommandFlags|TestTxsimCommandEnvVar|TestMintIntegrationTestSuite|TestBlobstreamCLI|TestUpgrade|TestMaliciousTestNode|TestBigBlobSuite|TestQGBIntegrationSuite|TestSignerTestSuite|TestPriorityTestSuite|TestTimeInPrepareProposalContext|TestBlobstream|TestCLITestSuite|TestLegacyUpgrade|TestSignerTwins|TestConcurrentTxSubmission|TestTxClientTestSuite|Test_testnode|TestEvictions" .PHONY: test-race ## test-bench: Run unit tests in bench mode. @@ -180,7 +205,7 @@ txsim-build: ## txsim-build-docker: Build the tx simulator Docker image. Requires Docker. txsim-build-docker: - docker build -t ghcr.io/celestiaorg/txsim -f docker/Dockerfile_txsim . + docker build -t ghcr.io/celestiaorg/txsim -f docker/txsim/Dockerfile . .PHONY: txsim-build-docker ## adr-gen: Download the ADR template from the celestiaorg/.github repo. @@ -246,4 +271,10 @@ enable-bbr: else \ echo "BBR is already enabled."; \ fi -.PHONY: enable-bbr \ No newline at end of file +.PHONY: enable-bbr + +## debug-version: Print the git tag and version. +debug-version: + @echo "GIT_TAG: $(GIT_TAG)" + @echo "VERSION: $(VERSION)" +.PHONY: debug-version diff --git a/README.md b/README.md index f1f94636c7..44906140b4 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ node | | | | ### Source -1. [Install Go](https://go.dev/doc/install) 1.22.6 +1. [Install Go](https://go.dev/doc/install) 1.23.1 1. Clone this repo 1. Install the celestia-app CLI @@ -84,7 +84,7 @@ system's kernel. The result should contain `bbr`: sysctl net.ipv4.tcp_congestion_control ``` -If not, enable it on Linux by calling the `make use-bbr` or by running: +If not, enable it on Linux by calling the `make enable-bbr` or by running: ```sh sudo modprobe tcp_bbr @@ -134,7 +134,7 @@ This repo contains multiple go modules. When using it, rename `go.work.example` ### Tools -1. Install [golangci-lint](https://golangci-lint.run/welcome/install) 1.59.1 +1. Install [golangci-lint](https://golangci-lint.run/welcome/install) 1.61.0 1. Install [markdownlint](https://github.com/DavidAnson/markdownlint) 0.39.0 1. Install [hadolint](https://github.com/hadolint/hadolint) 1. Install [yamllint](https://yamllint.readthedocs.io/en/stable/quickstart.html) @@ -169,8 +169,9 @@ Package-specific READMEs aim to explain implementation details for developers th ## Audits -| Date | Auditor | Version | Report | -|------------|-----------------------------------------------|-------------------------------------------------------------------------------------|---------------------------------------------------------------| -| 2023/9/15 | [Informal Systems](https://informal.systems/) | [v1.0.0-rc6](https://github.com/celestiaorg/celestia-app/releases/tag/v1.0.0-rc6) | [informal-systems.pdf](docs/audit/informal-systems.pdf) | -| 2023/10/17 | [Binary Builders](https://binary.builders/) | [v1.0.0-rc10](https://github.com/celestiaorg/celestia-app/releases/tag/v1.0.0-rc10) | [binary-builders.pdf](docs/audit/binary-builders.pdf) | -| 2024/7/1 | [Informal Systems](https://informal.systems/) | [v2.0.0-rc1](https://github.com/celestiaorg/celestia-app/releases/tag/v2.0.0-rc1) | [informal-systems-v2.pdf](docs/audit/informal-systems-v2.pdf) | +| Date | Auditor | Version | Report | +|------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------| +| 2023/9/15 | [Informal Systems](https://informal.systems/) | [v1.0.0-rc6](https://github.com/celestiaorg/celestia-app/releases/tag/v1.0.0-rc6) | [informal-systems.pdf](docs/audit/informal-systems.pdf) | +| 2023/10/17 | [Binary Builders](https://binary.builders/) | [v1.0.0-rc10](https://github.com/celestiaorg/celestia-app/releases/tag/v1.0.0-rc10) | [binary-builders.pdf](docs/audit/binary-builders.pdf) | +| 2024/7/1 | [Informal Systems](https://informal.systems/) | [v2.0.0-rc1](https://github.com/celestiaorg/celestia-app/releases/tag/v2.0.0-rc1) | [informal-systems-v2.pdf](docs/audit/informal-systems-v2.pdf) | +| 2024/9/20 | [Informal Systems](https://informal.systems/) | [306c587](https://github.com/celestiaorg/celestia-app/commit/306c58745d135d31c3777a1af2f58d50adbd32c8) | [informal-systems-authored-blobs.pdf](docs/audit/informal-systems-authored-blobs.pdf) | diff --git a/app/ante/ante.go b/app/ante/ante.go index dd65fdd120..c82313b65e 100644 --- a/app/ante/ante.go +++ b/app/ante/ante.go @@ -32,6 +32,8 @@ func NewAnteHandler( // Set up the context with a gas meter. // Must be called before gas consumption occurs in any other decorator. ante.NewSetUpContextDecorator(), + // Ensure the tx is not larger than the configured threshold. + NewMaxTxSizeDecorator(), // Ensure the tx does not contain any extension options. ante.NewExtensionOptionsDecorator(nil), // Ensure the tx passes ValidateBasic. @@ -42,7 +44,7 @@ func NewAnteHandler( ante.NewValidateMemoDecorator(accountKeeper), // Ensure the tx's gas limit is > the gas consumed based on the tx size. // Side effect: consumes gas from the gas meter. - ante.NewConsumeGasForTxSizeDecorator(accountKeeper), + NewConsumeGasForTxSizeDecorator(accountKeeper), // Ensure the feepayer (fee granter or first signer) has enough funds to pay for the tx. // Ensure the gas price >= network min gas price if app version >= 2. // Side effect: deducts fees from the fee payer. Sets the tx priority in context. diff --git a/app/ante/max_tx_size.go b/app/ante/max_tx_size.go new file mode 100644 index 0000000000..a9525777e2 --- /dev/null +++ b/app/ante/max_tx_size.go @@ -0,0 +1,33 @@ +package ante + +import ( + "fmt" + + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// MaxTxSizeDecorator ensures that a tx can not be larger than +// application's configured versioned constant. +type MaxTxSizeDecorator struct{} + +func NewMaxTxSizeDecorator() MaxTxSizeDecorator { + return MaxTxSizeDecorator{} +} + +// AnteHandle implements the AnteHandler interface. It ensures that tx size is under application's configured threshold. +func (d MaxTxSizeDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) { + // Tx size rule applies to app versions v3 and onwards. + if ctx.BlockHeader().Version.App < v3.Version { + return next(ctx, tx, simulate) + } + + currentTxSize := len(ctx.TxBytes()) + maxTxSize := appconsts.MaxTxSize(ctx.BlockHeader().Version.App) + if currentTxSize > maxTxSize { + bytesOverLimit := currentTxSize - maxTxSize + return ctx, fmt.Errorf("tx size %d bytes is larger than the application's configured threshold of %d bytes. Please reduce the size by %d bytes", currentTxSize, maxTxSize, bytesOverLimit) + } + return next(ctx, tx, simulate) +} diff --git a/app/ante/max_tx_size_test.go b/app/ante/max_tx_size_test.go new file mode 100644 index 0000000000..b52ceeb084 --- /dev/null +++ b/app/ante/max_tx_size_test.go @@ -0,0 +1,78 @@ +package ante_test + +import ( + "testing" + + "github.com/celestiaorg/celestia-app/v3/app/ante" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + version "github.com/tendermint/tendermint/proto/tendermint/version" +) + +func TestMaxTxSizeDecorator(t *testing.T) { + decorator := ante.NewMaxTxSizeDecorator() + anteHandler := sdk.ChainAnteDecorators(decorator) + + testCases := []struct { + name string + txSize int + expectError bool + appVersion uint64 + isCheckTx []bool + }{ + { + name: "good tx; under max tx size threshold", + txSize: v3.MaxTxSize - 1, + appVersion: v3.Version, + expectError: false, + isCheckTx: []bool{true, false}, + }, + { + name: "bad tx; over max tx size threshold", + txSize: v3.MaxTxSize + 1, + appVersion: v3.Version, + expectError: true, + isCheckTx: []bool{true, false}, + }, + { + name: "good tx; equal to max tx size threshold", + txSize: v3.MaxTxSize, + appVersion: v3.Version, + expectError: false, + isCheckTx: []bool{true, false}, + }, + { + name: "good tx; limit only applies to v3 and above", + txSize: v3.MaxTxSize + 10, + appVersion: v2.Version, + expectError: false, + isCheckTx: []bool{true, false}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, isCheckTx := range tc.isCheckTx { + + ctx := sdk.NewContext(nil, tmproto.Header{ + Version: version.Consensus{ + App: tc.appVersion, + }, + }, isCheckTx, nil) + + txBytes := make([]byte, tc.txSize) + + ctx = ctx.WithTxBytes(txBytes) + _, err := anteHandler(ctx, nil, false) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + } + }) + } +} diff --git a/app/ante/min_fee_test.go b/app/ante/min_fee_test.go index 30912813c4..fc66e11a67 100644 --- a/app/ante/min_fee_test.go +++ b/app/ante/min_fee_test.go @@ -9,7 +9,6 @@ import ( "github.com/celestiaorg/celestia-app/v3/app/ante" "github.com/celestiaorg/celestia-app/v3/app/encoding" "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" - v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" "github.com/celestiaorg/celestia-app/v3/test/util/testnode" "github.com/celestiaorg/celestia-app/v3/x/minfee" "github.com/cosmos/cosmos-sdk/codec" @@ -58,7 +57,7 @@ func TestValidateTxFee(t *testing.T) { { name: "bad tx; fee below required minimum", fee: sdk.NewCoins(sdk.NewInt64Coin(appconsts.BondDenom, feeAmount-1)), - gasLimit: uint64(float64(feeAmount) / v2.NetworkMinGasPrice), + gasLimit: uint64(float64(feeAmount) / appconsts.DefaultNetworkMinGasPrice), appVersion: uint64(2), isCheckTx: false, expErr: true, @@ -66,7 +65,7 @@ func TestValidateTxFee(t *testing.T) { { name: "good tx; fee equal to required minimum", fee: sdk.NewCoins(sdk.NewInt64Coin(appconsts.BondDenom, feeAmount)), - gasLimit: uint64(float64(feeAmount) / v2.NetworkMinGasPrice), + gasLimit: uint64(float64(feeAmount) / appconsts.DefaultNetworkMinGasPrice), appVersion: uint64(2), isCheckTx: false, expErr: false, @@ -74,7 +73,7 @@ func TestValidateTxFee(t *testing.T) { { name: "good tx; fee above required minimum", fee: sdk.NewCoins(sdk.NewInt64Coin(appconsts.BondDenom, feeAmount+1)), - gasLimit: uint64(float64(feeAmount) / v2.NetworkMinGasPrice), + gasLimit: uint64(float64(feeAmount) / appconsts.DefaultNetworkMinGasPrice), appVersion: uint64(2), isCheckTx: false, expErr: false, @@ -82,7 +81,7 @@ func TestValidateTxFee(t *testing.T) { { name: "good tx; with no fee (v1)", fee: sdk.NewCoins(sdk.NewInt64Coin(appconsts.BondDenom, feeAmount)), - gasLimit: uint64(float64(feeAmount) / v2.NetworkMinGasPrice), + gasLimit: uint64(float64(feeAmount) / appconsts.DefaultNetworkMinGasPrice), appVersion: uint64(1), isCheckTx: false, expErr: false, @@ -143,7 +142,7 @@ func TestValidateTxFee(t *testing.T) { ctx = ctx.WithMinGasPrices(sdk.DecCoins{validatorMinGasPriceCoin}) - networkMinGasPriceDec, err := sdk.NewDecFromStr(fmt.Sprintf("%f", v2.NetworkMinGasPrice)) + networkMinGasPriceDec, err := sdk.NewDecFromStr(fmt.Sprintf("%f", appconsts.DefaultNetworkMinGasPrice)) require.NoError(t, err) subspace, _ := paramsKeeper.GetSubspace(minfee.ModuleName) diff --git a/app/ante/tx_size_gas.go b/app/ante/tx_size_gas.go new file mode 100644 index 0000000000..356f6f2646 --- /dev/null +++ b/app/ante/tx_size_gas.go @@ -0,0 +1,151 @@ +package ante + +import ( + "encoding/hex" + + "cosmossdk.io/errors" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + "github.com/cosmos/cosmos-sdk/codec/legacy" + "github.com/cosmos/cosmos-sdk/crypto/keys/multisig" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + "github.com/cosmos/cosmos-sdk/x/auth/ante" + "github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx" + authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" + auth "github.com/cosmos/cosmos-sdk/x/auth/types" +) + +var ( + // Simulation signature values used to estimate gas consumption. + key = make([]byte, secp256k1.PubKeySize) + simSecp256k1Pubkey = &secp256k1.PubKey{Key: key} + simSecp256k1Sig [64]byte +) + +func init() { + // Decodes a valid hex string into a sepc256k1Pubkey for use in transaction simulation + bz, _ := hex.DecodeString("035AD6810A47F073553FF30D2FCC7E0D3B1C0B74B61A1AAA2582344037151E143A") + copy(key, bz) + simSecp256k1Pubkey.Key = key +} + +// ConsumeTxSizeGasDecorator will take in parameters and consume gas proportional +// to the size of tx before calling next AnteHandler. Note, the gas costs will be +// slightly over estimated due to the fact that any given signing account may need +// to be retrieved from state. +// +// CONTRACT: If simulate=true, then signatures must either be completely filled +// in or empty. +// CONTRACT: To use this decorator, signatures of transaction must be represented +// as legacytx.StdSignature otherwise simulate mode will incorrectly estimate gas cost. + +// The code was copied from celestia's fork of the cosmos-sdk: +// https://github.com/celestiaorg/cosmos-sdk/blob/release/v0.46.x-celestia/x/auth/ante/basic.go +// In app versions v2 and below, the txSizeCostPerByte used for gas cost estimation is taken from the auth module. +// In app v3 and above, the versioned constant appconsts.TxSizeCostPerByte is used. +type ConsumeTxSizeGasDecorator struct { + ak ante.AccountKeeper +} + +func NewConsumeGasForTxSizeDecorator(ak ante.AccountKeeper) ConsumeTxSizeGasDecorator { + return ConsumeTxSizeGasDecorator{ + ak: ak, + } +} + +func (cgts ConsumeTxSizeGasDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) { + sigTx, ok := tx.(authsigning.SigVerifiableTx) + if !ok { + return ctx, errors.Wrap(sdkerrors.ErrTxDecode, "invalid tx type") + } + params := cgts.ak.GetParams(ctx) + + consumeGasForTxSize(ctx, sdk.Gas(len(ctx.TxBytes())), params) + + // simulate gas cost for signatures in simulate mode + if simulate { + // in simulate mode, each element should be a nil signature + sigs, err := sigTx.GetSignaturesV2() + if err != nil { + return ctx, err + } + n := len(sigs) + + for i, signer := range sigTx.GetSigners() { + // if signature is already filled in, no need to simulate gas cost + if i < n && !isIncompleteSignature(sigs[i].Data) { + continue + } + + var pubkey cryptotypes.PubKey + + acc := cgts.ak.GetAccount(ctx, signer) + + // use placeholder simSecp256k1Pubkey if sig is nil + if acc == nil || acc.GetPubKey() == nil { + pubkey = simSecp256k1Pubkey + } else { + pubkey = acc.GetPubKey() + } + + // use stdsignature to mock the size of a full signature + simSig := legacytx.StdSignature{ //nolint:staticcheck // this will be removed when proto is ready + Signature: simSecp256k1Sig[:], + PubKey: pubkey, + } + + sigBz := legacy.Cdc.MustMarshal(simSig) + txBytes := sdk.Gas(len(sigBz) + 6) + + // If the pubkey is a multi-signature pubkey, then we estimate for the maximum + // number of signers. + if _, ok := pubkey.(*multisig.LegacyAminoPubKey); ok { + txBytes *= params.TxSigLimit + } + + consumeGasForTxSize(ctx, txBytes, params) + } + } + + return next(ctx, tx, simulate) +} + +// isIncompleteSignature tests whether SignatureData is fully filled in for simulation purposes +func isIncompleteSignature(data signing.SignatureData) bool { + if data == nil { + return true + } + + switch data := data.(type) { + case *signing.SingleSignatureData: + return len(data.Signature) == 0 + case *signing.MultiSignatureData: + if len(data.Signatures) == 0 { + return true + } + for _, s := range data.Signatures { + if isIncompleteSignature(s) { + return true + } + } + } + + return false +} + +// consumeGasForTxSize consumes gas based on the size of the transaction. +// It uses different parameters depending on the app version. +func consumeGasForTxSize(ctx sdk.Context, txBytes uint64, params auth.Params) { + // For app v2 and below we should get txSizeCostPerByte from auth module + if ctx.BlockHeader().Version.App <= v2.Version { + ctx.GasMeter().ConsumeGas(params.TxSizeCostPerByte*txBytes, "txSize") + } else { + // From v3 onwards, we should get txSizeCostPerByte from appconsts + txSizeCostPerByte := appconsts.TxSizeCostPerByte(ctx.BlockHeader().Version.App) + ctx.GasMeter().ConsumeGas(txSizeCostPerByte*txBytes, "txSize") + } +} diff --git a/app/ante/tx_size_gas_test.go b/app/ante/tx_size_gas_test.go new file mode 100644 index 0000000000..6b6bdbd80b --- /dev/null +++ b/app/ante/tx_size_gas_test.go @@ -0,0 +1,197 @@ +package ante_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/app/ante" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + testutil "github.com/celestiaorg/celestia-app/v3/test/util" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/crypto/types/multisig" + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + xauthsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/stretchr/testify/require" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proto/tendermint/version" +) + +const TxSizeCostPerByte = 8 + +func setup() (*app.App, sdk.Context, client.Context, error) { + app, _, _ := testutil.NewTestAppWithGenesisSet(app.DefaultConsensusParams()) + ctx := app.NewContext(false, tmproto.Header{}) + params := authtypes.DefaultParams() + // Override default with a different TxSizeCostPerByte value for testing + params.TxSizeCostPerByte = TxSizeCostPerByte + app.AccountKeeper.SetParams(ctx, params) + ctx = ctx.WithBlockHeight(1) + + // Set up TxConfig. + encodingConfig := simapp.MakeTestEncodingConfig() + // We're using TestMsg encoding in the test, so register it here. + encodingConfig.Amino.RegisterConcrete(&testdata.TestMsg{}, "testdata.TestMsg", nil) + testdata.RegisterInterfaces(encodingConfig.InterfaceRegistry) + + clientCtx := client.Context{}. + WithTxConfig(encodingConfig.TxConfig) + + return app, ctx, clientCtx, nil +} + +func TestConsumeGasForTxSize(t *testing.T) { + app, ctx, clientCtx, err := setup() + require.NoError(t, err) + var txBuilder client.TxBuilder + + // keys and addresses + priv1, _, addr1 := testdata.KeyTestPubAddr() + + // msg and signatures + msg := testdata.NewTestMsg(addr1) + feeAmount := testdata.NewTestFeeAmount() + gasLimit := testdata.NewTestGasLimit() + + cgtsd := ante.NewConsumeGasForTxSizeDecorator(app.AccountKeeper) + antehandler := sdk.ChainAnteDecorators(cgtsd) + + testCases := []struct { + version uint64 + name string + sigV2 signing.SignatureV2 + }{ + {v2.Version, "SingleSignatureData v2", signing.SignatureV2{PubKey: priv1.PubKey()}}, + {v2.Version, "MultiSignatureData v2", signing.SignatureV2{PubKey: priv1.PubKey(), Data: multisig.NewMultisig(2)}}, + {appconsts.LatestVersion, fmt.Sprintf("SingleSignatureData v%d", appconsts.LatestVersion), signing.SignatureV2{PubKey: priv1.PubKey()}}, + {appconsts.LatestVersion, fmt.Sprintf("MultiSignatureData v%d", appconsts.LatestVersion), signing.SignatureV2{PubKey: priv1.PubKey(), Data: multisig.NewMultisig(2)}}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // set the version + ctx = app.NewContext(false, tmproto.Header{Version: version.Consensus{ + App: tc.version, + }}) + + txBuilder = clientCtx.TxConfig.NewTxBuilder() + require.NoError(t, txBuilder.SetMsgs(msg)) + txBuilder.SetFeeAmount(feeAmount) + txBuilder.SetGasLimit(gasLimit) + txBuilder.SetMemo(strings.Repeat("01234567890", 10)) + + privs, accNums, accSeqs := []cryptotypes.PrivKey{priv1}, []uint64{0}, []uint64{0} + tx, err := createTestTx(txBuilder, clientCtx, privs, accNums, accSeqs, ctx.ChainID()) + require.NoError(t, err) + + txBytes, err := clientCtx.TxConfig.TxJSONEncoder()(tx) + require.Nil(t, err, "Cannot marshal tx: %v", err) + + // expected TxSizeCostPerByte is different for each version + var txSizeCostPerByte uint64 + if tc.version == v2.Version { + txSizeCostPerByte = TxSizeCostPerByte + } else { + txSizeCostPerByte = appconsts.TxSizeCostPerByte(tc.version) + } + + expectedGas := sdk.Gas(len(txBytes)) * txSizeCostPerByte + + // set suite.ctx with TxBytes manually + ctx = ctx.WithTxBytes(txBytes) + + // track how much gas is necessary to retrieve parameters + beforeGas := ctx.GasMeter().GasConsumed() + app.AccountKeeper.GetParams(ctx) + afterGas := ctx.GasMeter().GasConsumed() + expectedGas += afterGas - beforeGas + + beforeGas = ctx.GasMeter().GasConsumed() + ctx, err = antehandler(ctx, tx, false) + require.Nil(t, err, "ConsumeTxSizeGasDecorator returned error: %v", err) + + // require that decorator consumes expected amount of gas + consumedGas := ctx.GasMeter().GasConsumed() - beforeGas + require.Equal(t, expectedGas, consumedGas, "Decorator did not consume the correct amount of gas") + + // simulation must not underestimate gas of this decorator even with nil signatures + txBuilder, err := clientCtx.TxConfig.WrapTxBuilder(tx) + require.NoError(t, err) + require.NoError(t, txBuilder.SetSignatures(tc.sigV2)) + tx = txBuilder.GetTx() + + simTxBytes, err := clientCtx.TxConfig.TxJSONEncoder()(tx) + require.Nil(t, err, "Cannot marshal tx: %v", err) + // require that simulated tx is smaller than tx with signatures + require.True(t, len(simTxBytes) < len(txBytes), "simulated tx still has signatures") + + // Set suite.ctx with smaller simulated TxBytes manually + ctx = ctx.WithTxBytes(simTxBytes) + + beforeSimGas := ctx.GasMeter().GasConsumed() + + // run antehandler with simulate=true + ctx, err = antehandler(ctx, tx, true) + consumedSimGas := ctx.GasMeter().GasConsumed() - beforeSimGas + + // require that antehandler passes and does not underestimate decorator cost + require.Nil(t, err, "ConsumeTxSizeGasDecorator returned error: %v", err) + require.True(t, consumedSimGas >= expectedGas, "Simulate mode underestimates gas on AnteDecorator. Simulated cost: %d, expected cost: %d", consumedSimGas, expectedGas) + }) + } +} + +// createTestTx creates a test tx given multiple inputs. +func createTestTx(txBuilder client.TxBuilder, clientCtx client.Context, privs []cryptotypes.PrivKey, accNums []uint64, accSeqs []uint64, chainID string) (xauthsigning.Tx, error) { + // First round: we gather all the signer infos. We use the "set empty + // signature" hack to do that. + sigsV2 := make([]signing.SignatureV2, 0, len(privs)) + for i, priv := range privs { + sigV2 := signing.SignatureV2{ + PubKey: priv.PubKey(), + Data: &signing.SingleSignatureData{ + SignMode: clientCtx.TxConfig.SignModeHandler().DefaultMode(), + Signature: nil, + }, + Sequence: accSeqs[i], + } + + sigsV2 = append(sigsV2, sigV2) + } + err := txBuilder.SetSignatures(sigsV2...) + if err != nil { + return nil, err + } + + // Second round: all signer infos are set, so each signer can sign. + sigsV2 = []signing.SignatureV2{} + for i, priv := range privs { + signerData := xauthsigning.SignerData{ + ChainID: chainID, + AccountNumber: accNums[i], + Sequence: accSeqs[i], + } + sigV2, err := tx.SignWithPrivKey( + clientCtx.TxConfig.SignModeHandler().DefaultMode(), signerData, + txBuilder, priv, clientCtx.TxConfig, accSeqs[i]) + if err != nil { + return nil, err + } + + sigsV2 = append(sigsV2, sigV2) + } + err = txBuilder.SetSignatures(sigsV2...) + if err != nil { + return nil, err + } + + return txBuilder.GetTx(), nil +} diff --git a/app/app.go b/app/app.go index 71c519305e..b13e8f7f3f 100644 --- a/app/app.go +++ b/app/app.go @@ -10,8 +10,10 @@ import ( celestiatx "github.com/celestiaorg/celestia-app/v3/app/grpc/tx" "github.com/celestiaorg/celestia-app/v3/app/module" "github.com/celestiaorg/celestia-app/v3/app/posthandler" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" appv1 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v1" appv2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + appv3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" "github.com/celestiaorg/celestia-app/v3/pkg/proof" blobkeeper "github.com/celestiaorg/celestia-app/v3/x/blob/keeper" blobtypes "github.com/celestiaorg/celestia-app/v3/x/blob/types" @@ -108,6 +110,7 @@ var maccPerms = map[string][]string{ const ( v1 = appv1.Version v2 = appv2.Version + v3 = appv3.Version DefaultInitialVersion = v1 ) @@ -340,11 +343,11 @@ func New( packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp, // forward timeout packetforwardkeeper.DefaultRefundTransferPacketTimeoutTimestamp, // refund timeout ) - // PacketForwardMiddleware is used only for version 2. - transferStack = module.NewVersionedIBCModule(packetForwardMiddleware, transferStack, v2, v2) + // PacketForwardMiddleware is used only for version >= 2. + transferStack = module.NewVersionedIBCModule(packetForwardMiddleware, transferStack, v2, v3) // Token filter wraps packet forward middleware and is thus the first module in the transfer stack. tokenFilterMiddelware := tokenfilter.NewIBCMiddleware(transferStack) - transferStack = module.NewVersionedIBCModule(tokenFilterMiddelware, transferStack, v1, v2) + transferStack = module.NewVersionedIBCModule(tokenFilterMiddelware, transferStack, v1, v3) app.EvidenceKeeper = *evidencekeeper.NewKeeper( appCodec, @@ -455,7 +458,7 @@ func (app *App) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.R func (app *App) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { res := app.manager.EndBlock(ctx, req) currentVersion := app.AppVersion() - // For v1 only we upgrade using a agreed upon height known ahead of time + // For v1 only we upgrade using an agreed upon height known ahead of time if currentVersion == v1 { // check that we are at the height before the upgrade if req.Height == app.upgradeHeightV2-1 { @@ -477,6 +480,8 @@ func (app *App) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.Respo app.SignalKeeper.ResetTally(ctx) } } + res.Timeouts.TimeoutCommit = appconsts.GetTimeoutCommit(currentVersion) + res.Timeouts.TimeoutPropose = appconsts.GetTimeoutPropose(currentVersion) return res } @@ -532,26 +537,21 @@ func (app *App) Info(req abci.RequestInfo) abci.ResponseInfo { if resp.AppVersion > 0 && !app.IsSealed() { app.mountKeysAndInit(resp.AppVersion) } + + resp.Timeouts.TimeoutPropose = appconsts.GetTimeoutPropose(resp.AppVersion) + resp.Timeouts.TimeoutCommit = appconsts.GetTimeoutCommit(resp.AppVersion) + return resp } // InitChain implements the ABCI interface. This method is a wrapper around -// baseapp's InitChain so we can take the app version and setup the multicommit +// baseapp's InitChain so that we can take the app version and setup the multicommit // store. // // Side-effect: calls baseapp.Init() func (app *App) InitChain(req abci.RequestInitChain) (res abci.ResponseInitChain) { - // genesis must always contain the consensus params. The validator set however is derived from the - // initial genesis state. The genesis must always contain a non zero app version which is the initial - // version that the chain starts on - if req.ConsensusParams == nil || req.ConsensusParams.Version == nil { - panic("no consensus params set") - } - if req.ConsensusParams.Version.AppVersion == 0 { - panic("app version 0 is not accepted. Please set an app version in the genesis") - } + req = setDefaultAppVersion(req) appVersion := req.ConsensusParams.Version.AppVersion - // mount the stores for the provided app version if it has not already been mounted if app.AppVersion() == 0 && !app.IsSealed() { app.mountKeysAndInit(appVersion) @@ -564,13 +564,31 @@ func (app *App) InitChain(req abci.RequestInitChain) (res abci.ResponseInitChain app.SetInitialAppVersionInConsensusParams(ctx, appVersion) app.SetAppVersion(ctx, appVersion) } + res.Timeouts.TimeoutCommit = appconsts.GetTimeoutCommit(appVersion) + res.Timeouts.TimeoutPropose = appconsts.GetTimeoutPropose(appVersion) return res } +// setDefaultAppVersion sets the default app version in the consensus params if +// it was 0. This is needed because chains (e.x. mocha-4) did not explicitly set +// an app version in genesis.json. +func setDefaultAppVersion(req abci.RequestInitChain) abci.RequestInitChain { + if req.ConsensusParams == nil { + panic("no consensus params set") + } + if req.ConsensusParams.Version == nil { + panic("no version set in consensus params") + } + if req.ConsensusParams.Version.AppVersion == 0 { + req.ConsensusParams.Version.AppVersion = v1 + } + return req +} + // mountKeysAndInit mounts the keys for the provided app version and then // invokes baseapp.Init(). func (app *App) mountKeysAndInit(appVersion uint64) { - app.BaseApp.Logger().Debug(fmt.Sprintf("mounting KV stores for app version %v", appVersion)) + app.Logger().Info(fmt.Sprintf("mounting KV stores for app version %v", appVersion)) app.MountKVStores(app.versionedKeys(appVersion)) // Invoke load latest version for its side-effect of invoking baseapp.Init() @@ -585,9 +603,9 @@ func (app *App) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.Res if err := tmjson.Unmarshal(req.AppStateBytes, &genesisState); err != nil { panic(err) } - - app.UpgradeKeeper.SetModuleVersionMap(ctx, app.manager.GetVersionMap(req.ConsensusParams.Version.AppVersion)) - return app.manager.InitGenesis(ctx, app.appCodec, genesisState, req.ConsensusParams.Version.AppVersion) + appVersion := req.ConsensusParams.Version.AppVersion + app.UpgradeKeeper.SetModuleVersionMap(ctx, app.manager.GetVersionMap(appVersion)) + return app.manager.InitGenesis(ctx, app.appCodec, genesisState, appVersion) } // LoadHeight loads a particular height @@ -795,19 +813,38 @@ func (app *App) OfferSnapshot(req abci.RequestOfferSnapshot) abci.ResponseOfferS return app.BaseApp.OfferSnapshot(req) } + app.Logger().Info("offering snapshot", "height", req.Snapshot.Height, "app_version", req.AppVersion) + if req.AppVersion != 0 { + if !isSupportedAppVersion(req.AppVersion) { + app.Logger().Info("rejecting snapshot because unsupported app version", "app_version", req.AppVersion) + return abci.ResponseOfferSnapshot{ + Result: abci.ResponseOfferSnapshot_REJECT, + } + } + + app.Logger().Info("mounting keys for snapshot", "app_version", req.AppVersion) + app.mountKeysAndInit(req.AppVersion) + return app.BaseApp.OfferSnapshot(req) + } + + // If the app version is not set in the snapshot, this falls back to inferring the app version based on the upgrade height. if app.upgradeHeightV2 == 0 { - app.Logger().Debug("v2 upgrade height not set, assuming app version 2") + app.Logger().Info("v2 upgrade height not set, assuming app version 2") app.mountKeysAndInit(v2) return app.BaseApp.OfferSnapshot(req) } if req.Snapshot.Height >= uint64(app.upgradeHeightV2) { - app.Logger().Debug("snapshot height is greater than or equal to upgrade height, assuming app version 2") + app.Logger().Info("snapshot height is greater than or equal to upgrade height, assuming app version 2") app.mountKeysAndInit(v2) return app.BaseApp.OfferSnapshot(req) } - app.Logger().Debug("snapshot height is less than upgrade height, assuming app version 1") + app.Logger().Info("snapshot height is less than upgrade height, assuming app version 1") app.mountKeysAndInit(v1) return app.BaseApp.OfferSnapshot(req) } + +func isSupportedAppVersion(appVersion uint64) bool { + return appVersion == v1 || appVersion == v2 || appVersion == v3 +} diff --git a/app/app_test.go b/app/app_test.go index 5e99ec522c..ae1c6089f5 100644 --- a/app/app_test.go +++ b/app/app_test.go @@ -1,10 +1,15 @@ package app_test import ( + "encoding/json" + "os" + "path/filepath" "testing" "github.com/celestiaorg/celestia-app/v3/app" "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/test/util" + "github.com/celestiaorg/celestia-app/v3/test/util/testnode" "github.com/celestiaorg/celestia-app/v3/x/minfee" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/snapshots" @@ -13,6 +18,7 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmdb "github.com/tendermint/tm-db" ) @@ -52,7 +58,7 @@ func TestNew(t *testing.T) { }) } -func TestOfferSnapshot(t *testing.T) { +func TestInitChain(t *testing.T) { logger := log.NewNopLogger() db := tmdb.NewMemDB() traceStore := &NoopWriter{} @@ -60,35 +66,135 @@ func TestOfferSnapshot(t *testing.T) { encodingConfig := encoding.MakeConfig(app.ModuleEncodingRegisters...) upgradeHeight := int64(0) appOptions := NoopAppOptions{} - snapshotOption := getSnapshotOption(t) - app := app.New(logger, db, traceStore, invCheckPeriod, encodingConfig, upgradeHeight, appOptions, snapshotOption) + testApp := app.New(logger, db, traceStore, invCheckPeriod, encodingConfig, upgradeHeight, appOptions) + genesisState, _, _ := util.GenesisStateWithSingleValidator(testApp, "account") + appStateBytes, err := json.MarshalIndent(genesisState, "", " ") + require.NoError(t, err) + genesis := testnode.DefaultConfig().Genesis - t.Run("should return ACCEPT", func(t *testing.T) { - request := abci.RequestOfferSnapshot{ - Snapshot: &abci.Snapshot{ - Height: 0x1b07ec, - Format: 0x2, - Chunks: 0x1, - Hash: []uint8{0xaf, 0xa5, 0xe, 0x16, 0x45, 0x4, 0x2e, 0x45, 0xd3, 0x49, 0xdf, 0x83, 0x2a, 0x57, 0x9d, 0x64, 0xc8, 0xad, 0xa5, 0xb, 0x65, 0x1b, 0x46, 0xd6, 0xc3, 0x85, 0x6, 0x51, 0xd7, 0x45, 0x8e, 0xb8}, - Metadata: []uint8{0xa, 0x20, 0xaf, 0xa5, 0xe, 0x16, 0x45, 0x4, 0x2e, 0x45, 0xd3, 0x49, 0xdf, 0x83, 0x2a, 0x57, 0x9d, 0x64, 0xc8, 0xad, 0xa5, 0xb, 0x65, 0x1b, 0x46, 0xd6, 0xc3, 0x85, 0x6, 0x51, 0xd7, 0x45, 0x8e, 0xb8}, + type testCase struct { + name string + request abci.RequestInitChain + wantPanic bool + } + testCases := []testCase{ + { + name: "should panic if consensus params not set", + request: abci.RequestInitChain{}, + wantPanic: true, + }, + { + name: "should not panic on a genesis that does not contain an app version", + request: abci.RequestInitChain{ + Time: genesis.GenesisTime, + ChainId: genesis.ChainID, + ConsensusParams: &abci.ConsensusParams{ + Block: &abci.BlockParams{}, + Evidence: &genesis.ConsensusParams.Evidence, + Validator: &genesis.ConsensusParams.Validator, + Version: &tmproto.VersionParams{}, // explicitly set to empty to remove app version., + }, + AppStateBytes: appStateBytes, + InitialHeight: 0, }, - AppHash: []byte("apphash"), - } + wantPanic: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + application := app.New(logger, db, traceStore, invCheckPeriod, encodingConfig, upgradeHeight, appOptions) + if tc.wantPanic { + assert.Panics(t, func() { application.InitChain(tc.request) }) + } else { + assert.NotPanics(t, func() { application.InitChain(tc.request) }) + } + }) + } +} + +func TestOfferSnapshot(t *testing.T) { + t.Run("should ACCEPT a snapshot with app version 0", func(t *testing.T) { + // Snapshots taken before the app version field was introduced to RequestOfferSnapshot should still be accepted. + app := createTestApp(t) + request := createRequest() + want := abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT} + got := app.OfferSnapshot(request) + assert.Equal(t, want, got) + }) + t.Run("should ACCEPT a snapshot with app version 1", func(t *testing.T) { + app := createTestApp(t) + request := createRequest() + request.AppVersion = 1 + want := abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT} + got := app.OfferSnapshot(request) + assert.Equal(t, want, got) + }) + t.Run("should ACCEPT a snapshot with app version 2", func(t *testing.T) { + app := createTestApp(t) + request := createRequest() + request.AppVersion = 2 want := abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT} got := app.OfferSnapshot(request) assert.Equal(t, want, got) }) + t.Run("should ACCEPT a snapshot with app version 3", func(t *testing.T) { + app := createTestApp(t) + request := createRequest() + request.AppVersion = 3 + want := abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT} + got := app.OfferSnapshot(request) + assert.Equal(t, want, got) + }) + t.Run("should REJECT a snapshot with unsupported app version", func(t *testing.T) { + app := createTestApp(t) + request := createRequest() + request.AppVersion = 4 // unsupported app version + want := abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT} + got := app.OfferSnapshot(request) + assert.Equal(t, want, got) + }) } -func getSnapshotOption(t *testing.T) func(*baseapp.BaseApp) { - snapshotDir := t.TempDir() - snapshotDB, err := tmdb.NewDB("metadata", tmdb.GoLevelDBBackend, t.TempDir()) +func createTestApp(t *testing.T) *app.App { + db := tmdb.NewMemDB() + config := encoding.MakeConfig(app.ModuleEncodingRegisters...) + upgradeHeight := int64(3) + snapshotDir := filepath.Join(t.TempDir(), "data", "snapshots") + t.Cleanup(func() { + err := os.RemoveAll(snapshotDir) + require.NoError(t, err) + }) + snapshotDB, err := tmdb.NewDB("metadata", tmdb.GoLevelDBBackend, snapshotDir) + t.Cleanup(func() { + err := snapshotDB.Close() + require.NoError(t, err) + }) require.NoError(t, err) snapshotStore, err := snapshots.NewStore(snapshotDB, snapshotDir) require.NoError(t, err) - interval := uint64(10) - keepRecent := uint32(10) - return baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(interval, keepRecent)) + baseAppOption := baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(10, 10)) + testApp := app.New(log.NewNopLogger(), db, nil, 0, config, upgradeHeight, util.EmptyAppOptions{}, baseAppOption) + require.NoError(t, err) + response := testApp.Info(abci.RequestInfo{}) + require.Equal(t, uint64(0), response.AppVersion) + return testApp +} + +func createRequest() abci.RequestOfferSnapshot { + return abci.RequestOfferSnapshot{ + // Snapshot was created by logging the contents of OfferSnapshot on a + // node that was syncing via state sync. + Snapshot: &abci.Snapshot{ + Height: 0x1b07ec, + Format: 0x2, + Chunks: 0x1, + Hash: []uint8{0xaf, 0xa5, 0xe, 0x16, 0x45, 0x4, 0x2e, 0x45, 0xd3, 0x49, 0xdf, 0x83, 0x2a, 0x57, 0x9d, 0x64, 0xc8, 0xad, 0xa5, 0xb, 0x65, 0x1b, 0x46, 0xd6, 0xc3, 0x85, 0x6, 0x51, 0xd7, 0x45, 0x8e, 0xb8}, + Metadata: []uint8{0xa, 0x20, 0xaf, 0xa5, 0xe, 0x16, 0x45, 0x4, 0x2e, 0x45, 0xd3, 0x49, 0xdf, 0x83, 0x2a, 0x57, 0x9d, 0x64, 0xc8, 0xad, 0xa5, 0xb, 0x65, 0x1b, 0x46, 0xd6, 0xc3, 0x85, 0x6, 0x51, 0xd7, 0x45, 0x8e, 0xb8}, + }, + AppHash: []byte("apphash"), + AppVersion: 0, // unit tests will override this + } } // NoopWriter is a no-op implementation of a writer. diff --git a/app/benchmarks/README.md b/app/benchmarks/README.md new file mode 100644 index 0000000000..f7f720741b --- /dev/null +++ b/app/benchmarks/README.md @@ -0,0 +1,27 @@ +# Benchmarks + +This package contains benchmarks for the ABCI methods with the following transaction types: + +- Message send +- IBC update client +- PayForBlobs + +## How to run + +To run the benchmarks, run the following in the root directory: + +```shell +go test -tags=bench_abci_methods -bench= app/benchmarks/benchmark_* +``` + +## Results + +The results are outlined in the [results](results.md) document. + +## Key takeaways + +We decided to softly limit the number of messages contained in a block, via introducing the `MaxPFBMessages` and `MaxNonPFBMessages`, and checking against them in prepare proposal. + +This way, the default block construction mechanism will only propose blocks that respect these limitations. And if a block that doesn't respect them reached consensus, it will still be accepted since this rule is not consensus breaking. + +As specified in [results](results.md) document, those results were generated on 16 core 48GB RAM machine, and gave us certain thresholds. However, when we run the same experiments on the recommended validator setup, 4 cores 16GB RAM, the numbers were lower. These low numbers are what we used in the limits. diff --git a/app/benchmarks/benchmark_ibc_update_client_test.go b/app/benchmarks/benchmark_ibc_update_client_test.go new file mode 100644 index 0000000000..873c560a95 --- /dev/null +++ b/app/benchmarks/benchmark_ibc_update_client_test.go @@ -0,0 +1,518 @@ +//go:build bench_abci_methods + +package benchmarks_test + +import ( + "fmt" + "math" + "testing" + "time" + + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v3/pkg/user" + testutil "github.com/celestiaorg/celestia-app/v3/test/util" + "github.com/celestiaorg/celestia-app/v3/test/util/testfactory" + dbm "github.com/cometbft/cometbft-db" + sdk "github.com/cosmos/cosmos-sdk/types" + types3 "github.com/cosmos/ibc-go/v6/modules/core/02-client/types" + types2 "github.com/cosmos/ibc-go/v6/modules/core/23-commitment/types" + types4 "github.com/cosmos/ibc-go/v6/modules/light-clients/07-tendermint/types" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" + crypto2 "github.com/tendermint/tendermint/proto/tendermint/crypto" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmprotoversion "github.com/tendermint/tendermint/proto/tendermint/version" + "github.com/tendermint/tendermint/version" + + "github.com/tendermint/tendermint/crypto/ed25519" + sm "github.com/tendermint/tendermint/state" + types0 "github.com/tendermint/tendermint/types" +) + +func BenchmarkIBC_CheckTx_Update_Client_Multi(b *testing.B) { + testCases := []struct { + numberOfValidators int + }{ + {numberOfValidators: 2}, + {numberOfValidators: 10}, + {numberOfValidators: 25}, + {numberOfValidators: 50}, + {numberOfValidators: 75}, + {numberOfValidators: 100}, + {numberOfValidators: 125}, + {numberOfValidators: 150}, + {numberOfValidators: 175}, + {numberOfValidators: 200}, + {numberOfValidators: 225}, + {numberOfValidators: 250}, + {numberOfValidators: 300}, + {numberOfValidators: 400}, + {numberOfValidators: 500}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("number of validators: %d", testCase.numberOfValidators), func(b *testing.B) { + benchmarkIBCCheckTxUpdateClient(b, testCase.numberOfValidators) + }) + } +} + +func benchmarkIBCCheckTxUpdateClient(b *testing.B, numberOfValidators int) { + testApp, rawTxs := generateIBCUpdateClientTransaction(b, numberOfValidators, 1, 1) + testApp.Commit() + + checkTxRequest := types.RequestCheckTx{ + Type: types.CheckTxType_New, + Tx: rawTxs[0], + } + + b.ResetTimer() + resp := testApp.CheckTx(checkTxRequest) + b.StopTimer() + require.Equal(b, uint32(0), resp.Code) + require.Equal(b, "", resp.Codespace) + b.ReportMetric(float64(resp.GasUsed), "gas_used") + b.ReportMetric(float64(len(rawTxs[0])), "transaction_size(byte)") + b.ReportMetric(float64(numberOfValidators), "number_of_validators") + b.ReportMetric(float64(2*numberOfValidators/3), "number_of_verified_signatures") +} + +func BenchmarkIBC_DeliverTx_Update_Client_Multi(b *testing.B) { + testCases := []struct { + numberOfValidators int + }{ + {numberOfValidators: 2}, + {numberOfValidators: 10}, + {numberOfValidators: 25}, + {numberOfValidators: 50}, + {numberOfValidators: 75}, + {numberOfValidators: 100}, + {numberOfValidators: 125}, + {numberOfValidators: 150}, + {numberOfValidators: 175}, + {numberOfValidators: 200}, + {numberOfValidators: 225}, + {numberOfValidators: 250}, + {numberOfValidators: 300}, + {numberOfValidators: 400}, + {numberOfValidators: 500}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("number of validators: %d", testCase.numberOfValidators), func(b *testing.B) { + benchmarkIBCDeliverTxUpdateClient(b, testCase.numberOfValidators) + }) + } +} + +func benchmarkIBCDeliverTxUpdateClient(b *testing.B, numberOfValidators int) { + testApp, rawTxs := generateIBCUpdateClientTransaction(b, numberOfValidators, 1, 1) + + deliverTxRequest := types.RequestDeliverTx{ + Tx: rawTxs[0], + } + + b.ResetTimer() + resp := testApp.DeliverTx(deliverTxRequest) + b.StopTimer() + require.Equal(b, uint32(0), resp.Code) + require.Equal(b, "", resp.Codespace) + b.ReportMetric(float64(resp.GasUsed), "gas_used") + b.ReportMetric(float64(len(rawTxs[0])), "transaction_size(byte)") + b.ReportMetric(float64(numberOfValidators), "number_of_validators") + b.ReportMetric(float64(2*numberOfValidators/3), "number_of_verified_signatures") +} + +func BenchmarkIBC_PrepareProposal_Update_Client_Multi(b *testing.B) { + testCases := []struct { + numberOfTransactions, numberOfValidators int + }{ + {numberOfTransactions: 6_000, numberOfValidators: 2}, + {numberOfTransactions: 3_000, numberOfValidators: 10}, + {numberOfTransactions: 2_000, numberOfValidators: 25}, + {numberOfTransactions: 1_000, numberOfValidators: 50}, + {numberOfTransactions: 500, numberOfValidators: 75}, + {numberOfTransactions: 500, numberOfValidators: 100}, + {numberOfTransactions: 500, numberOfValidators: 125}, + {numberOfTransactions: 500, numberOfValidators: 150}, + {numberOfTransactions: 500, numberOfValidators: 175}, + {numberOfTransactions: 500, numberOfValidators: 200}, + {numberOfTransactions: 500, numberOfValidators: 225}, + {numberOfTransactions: 500, numberOfValidators: 250}, + {numberOfTransactions: 500, numberOfValidators: 300}, + {numberOfTransactions: 500, numberOfValidators: 400}, + {numberOfTransactions: 500, numberOfValidators: 500}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("number of validators: %d", testCase.numberOfValidators), func(b *testing.B) { + benchmarkIBCPrepareProposalUpdateClient(b, testCase.numberOfValidators, testCase.numberOfTransactions) + }) + } +} + +func benchmarkIBCPrepareProposalUpdateClient(b *testing.B, numberOfValidators, count int) { + testApp, rawTxs := generateIBCUpdateClientTransaction(b, numberOfValidators, count, 0) + + blockData := &tmproto.Data{ + Txs: rawTxs, + } + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: blockData, + ChainId: testApp.GetChainID(), + Height: 10, + } + + b.ResetTimer() + prepareProposalResponse := testApp.PrepareProposal(prepareProposalRequest) + b.StopTimer() + require.GreaterOrEqual(b, len(prepareProposalResponse.BlockData.Txs), 1) + b.ReportMetric(float64(b.Elapsed().Nanoseconds()), "prepare_proposal_time(ns)") + b.ReportMetric(float64(len(prepareProposalResponse.BlockData.Txs)), "number_of_transactions") + b.ReportMetric(float64(len(rawTxs[0])), "transactions_size(byte)") + b.ReportMetric(calculateBlockSizeInMb(prepareProposalResponse.BlockData.Txs), "block_size(mb)") + b.ReportMetric(float64(calculateTotalGasUsed(testApp, prepareProposalResponse.BlockData.Txs)), "total_gas_used") + b.ReportMetric(float64(numberOfValidators), "number_of_validators") + b.ReportMetric(float64(2*numberOfValidators/3), "number_of_verified_signatures") +} + +func BenchmarkIBC_ProcessProposal_Update_Client_Multi(b *testing.B) { + testCases := []struct { + numberOfTransactions, numberOfValidators int + }{ + {numberOfTransactions: 6_000, numberOfValidators: 2}, + {numberOfTransactions: 3_000, numberOfValidators: 10}, + {numberOfTransactions: 2_000, numberOfValidators: 25}, + {numberOfTransactions: 1_000, numberOfValidators: 50}, + {numberOfTransactions: 500, numberOfValidators: 75}, + {numberOfTransactions: 500, numberOfValidators: 100}, + {numberOfTransactions: 500, numberOfValidators: 125}, + {numberOfTransactions: 500, numberOfValidators: 150}, + {numberOfTransactions: 500, numberOfValidators: 175}, + {numberOfTransactions: 500, numberOfValidators: 200}, + {numberOfTransactions: 500, numberOfValidators: 225}, + {numberOfTransactions: 500, numberOfValidators: 250}, + {numberOfTransactions: 500, numberOfValidators: 300}, + {numberOfTransactions: 500, numberOfValidators: 400}, + {numberOfTransactions: 500, numberOfValidators: 500}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("number of validators: %d", testCase.numberOfValidators), func(b *testing.B) { + benchmarkIBCProcessProposalUpdateClient(b, testCase.numberOfValidators, testCase.numberOfTransactions) + }) + } +} + +func benchmarkIBCProcessProposalUpdateClient(b *testing.B, numberOfValidators, count int) { + testApp, rawTxs := generateIBCUpdateClientTransaction(b, numberOfValidators, count, 0) + + blockData := &tmproto.Data{ + Txs: rawTxs, + } + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: blockData, + ChainId: testApp.GetChainID(), + Height: 10, + } + + prepareProposalResponse := testApp.PrepareProposal(prepareProposalRequest) + require.GreaterOrEqual(b, len(prepareProposalResponse.BlockData.Txs), 1) + + processProposalRequest := types.RequestProcessProposal{ + BlockData: prepareProposalResponse.BlockData, + Header: tmproto.Header{ + Height: 10, + DataHash: prepareProposalResponse.BlockData.Hash, + ChainID: testutil.ChainID, + Version: tmprotoversion.Consensus{ + App: testApp.AppVersion(), + }, + }, + } + + b.ResetTimer() + resp := testApp.ProcessProposal(processProposalRequest) + b.StopTimer() + require.Equal(b, types.ResponseProcessProposal_ACCEPT, resp.Result) + + b.ReportMetric(float64(b.Elapsed().Nanoseconds()), "process_proposal_time(ns)") + b.ReportMetric(float64(len(prepareProposalResponse.BlockData.Txs)), "number_of_transactions") + b.ReportMetric(float64(len(rawTxs[0])), "transactions_size(byte)") + b.ReportMetric(calculateBlockSizeInMb(prepareProposalResponse.BlockData.Txs), "block_size(mb)") + b.ReportMetric(float64(calculateTotalGasUsed(testApp, prepareProposalResponse.BlockData.Txs)), "total_gas_used") + b.ReportMetric(float64(numberOfValidators), "number_of_validators") + b.ReportMetric(float64(2*numberOfValidators/3), "number_of_verified_signatures") +} + +// generateIBCUpdateClientTransaction creates a test app then generates an IBC +// update client transaction with the specified number of validators. +// Note: the number of the verified signatures is: 2 * numberOfValidators / 3 +// the offset is just a hack for transactions to be processed by the needed +// ABCI method. +func generateIBCUpdateClientTransaction(b *testing.B, numberOfValidators int, numberOfMessages int, offsetAccountSequence int) (*app.App, [][]byte) { + account := "test" + testApp, kr := testutil.SetupTestAppWithGenesisValSetAndMaxSquareSize(app.DefaultConsensusParams(), 128, account) + addr := testfactory.GetAddress(kr, account) + enc := encoding.MakeConfig(app.ModuleEncodingRegisters...) + acc := testutil.DirectQueryAccount(testApp, addr) + signer, err := user.NewSigner(kr, enc.TxConfig, testutil.ChainID, appconsts.LatestVersion, user.NewAccount(account, acc.GetAccountNumber(), acc.GetSequence())) + require.NoError(b, err) + + msgs := generateUpdateClientTransaction( + b, + testApp, + *signer, + acc.GetAddress().String(), + account, + numberOfValidators, + numberOfMessages, + ) + + accountSequence := testutil.DirectQueryAccount(testApp, addr).GetSequence() + err = signer.SetSequence(account, accountSequence+uint64(offsetAccountSequence)) + require.NoError(b, err) + rawTxs := make([][]byte, 0, numberOfMessages) + for i := 0; i < numberOfMessages; i++ { + rawTx, err := signer.CreateTx([]sdk.Msg{msgs[i]}, user.SetGasLimit(25497600000), user.SetFee(100000)) + require.NoError(b, err) + rawTxs = append(rawTxs, rawTx) + accountSequence++ + err = signer.SetSequence(account, accountSequence) + require.NoError(b, err) + } + + return testApp, rawTxs +} + +func generateUpdateClientTransaction(b *testing.B, app *app.App, signer user.Signer, signerAddr string, signerName string, numberOfValidators int, numberOfMsgs int) []*types3.MsgUpdateClient { + state, _, privVals := makeState(numberOfValidators, 5) + wBefore := time.Now() + time.Sleep(time.Second) + w := time.Now() + lastResultHash := crypto.CRandBytes(tmhash.Size) + lastCommitHash := crypto.CRandBytes(tmhash.Size) + lastBlockHash := crypto.CRandBytes(tmhash.Size) + lastBlockID := makeBlockID(lastBlockHash, 1000, []byte("hash")) + header := tmproto.Header{ + Version: tmprotoversion.Consensus{Block: version.BlockProtocol, App: 1}, + ChainID: state.ChainID, + Height: 5, + Time: w, + LastCommitHash: lastCommitHash, + DataHash: crypto.CRandBytes(tmhash.Size), + ValidatorsHash: state.Validators.Hash(), + NextValidatorsHash: state.Validators.Hash(), + ConsensusHash: crypto.CRandBytes(tmhash.Size), + AppHash: crypto.CRandBytes(tmhash.Size), + LastResultsHash: lastResultHash, + EvidenceHash: crypto.CRandBytes(tmhash.Size), + ProposerAddress: crypto.CRandBytes(crypto.AddressSize), + LastBlockId: lastBlockID.ToProto(), + } + t := types0.Header{ + Version: tmprotoversion.Consensus{Block: version.BlockProtocol, App: 1}, + ChainID: state.ChainID, + Height: 5, + Time: w, + LastCommitHash: header.LastCommitHash, + DataHash: header.DataHash, + ValidatorsHash: header.ValidatorsHash, + NextValidatorsHash: header.NextValidatorsHash, + ConsensusHash: header.ConsensusHash, + AppHash: header.AppHash, + LastResultsHash: header.LastResultsHash, + EvidenceHash: header.EvidenceHash, + ProposerAddress: header.ProposerAddress, + LastBlockID: lastBlockID, + } + header0Hash := t.Hash() + blockID := makeBlockID(header0Hash, 1000, []byte("partshash")) + commit, err := makeValidCommit(5, blockID, state.Validators, privVals) + require.NoError(b, err) + signatures := make([]tmproto.CommitSig, numberOfValidators) + validators := make([]*tmproto.Validator, numberOfValidators) + for i := 0; i < numberOfValidators; i++ { + signatures[i] = tmproto.CommitSig{ + BlockIdFlag: tmproto.BlockIDFlag(commit.Signatures[i].BlockIDFlag), + ValidatorAddress: commit.Signatures[i].ValidatorAddress, + Timestamp: commit.Signatures[i].Timestamp, + Signature: commit.Signatures[i].Signature, + } + validators[i] = &tmproto.Validator{ + Address: state.Validators.Validators[i].Address, + PubKey: crypto2.PublicKey{Sum: &crypto2.PublicKey_Ed25519{Ed25519: state.Validators.Validators[i].PubKey.Bytes()}}, + VotingPower: state.Validators.Validators[i].VotingPower, + ProposerPriority: state.Validators.Validators[i].ProposerPriority, + } + } + sh := tmproto.SignedHeader{ + Header: &header, + Commit: &tmproto.Commit{ + Height: commit.Height, + Round: commit.Round, + BlockID: tmproto.BlockID{ + Hash: header0Hash, + PartSetHeader: tmproto.PartSetHeader{ + Total: commit.BlockID.PartSetHeader.Total, + Hash: commit.BlockID.PartSetHeader.Hash, + }, + }, + Signatures: signatures, + }, + } + clientState := types4.ClientState{ + ChainId: chainID, + TrustLevel: types4.Fraction{Numerator: 1, Denominator: 3}, + TrustingPeriod: time.Hour * 24 * 21 * 100, // we want to always accept the upgrade + UnbondingPeriod: time.Hour * 24 * 21 * 101, + MaxClockDrift: math.MaxInt64 - 1, + FrozenHeight: types3.Height{}, + LatestHeight: types3.Height{ + RevisionNumber: 0, + RevisionHeight: 4, + }, + ProofSpecs: types2.GetSDKSpecs(), + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: true, + } + consensusState := types4.ConsensusState{ + Timestamp: wBefore, + Root: types2.MerkleRoot{Hash: lastBlockHash}, + NextValidatorsHash: state.Validators.Hash(), + } + + msgs := make([]*types3.MsgUpdateClient, numberOfMsgs) + for index := 0; index < numberOfMsgs; index++ { + createClientMsg, err := types3.NewMsgCreateClient(&clientState, &consensusState, signerAddr) + require.NoError(b, err) + rawTx, err := signer.CreateTx([]sdk.Msg{createClientMsg}, user.SetGasLimit(2549760000), user.SetFee(10000)) + require.NoError(b, err) + resp := app.DeliverTx(types.RequestDeliverTx{Tx: rawTx}) + var clientName string + for _, event := range resp.Events { + if event.Type == types3.EventTypeCreateClient { + for _, attribute := range event.Attributes { + if string(attribute.Key) == types3.AttributeKeyClientID { + clientName = string(attribute.Value) + } + } + } + } + require.NotEmpty(b, clientName) + + msg, err := types3.NewMsgUpdateClient( + clientName, + &types4.Header{ + SignedHeader: &sh, + ValidatorSet: &tmproto.ValidatorSet{ + Validators: validators, + Proposer: &tmproto.Validator{ + Address: state.Validators.Proposer.Address, + PubKey: crypto2.PublicKey{Sum: &crypto2.PublicKey_Ed25519{Ed25519: state.Validators.Proposer.PubKey.Bytes()}}, + VotingPower: state.Validators.Proposer.VotingPower, + ProposerPriority: state.Validators.Proposer.ProposerPriority, + }, + TotalVotingPower: state.Validators.TotalVotingPower(), + }, + TrustedHeight: types3.Height{ + RevisionNumber: 0, + RevisionHeight: 4, + }, + TrustedValidators: &tmproto.ValidatorSet{ + Validators: validators, + Proposer: &tmproto.Validator{ + Address: state.Validators.Proposer.Address, + PubKey: crypto2.PublicKey{Sum: &crypto2.PublicKey_Ed25519{Ed25519: state.Validators.Proposer.PubKey.Bytes()}}, + VotingPower: state.Validators.Proposer.VotingPower, + ProposerPriority: state.Validators.Proposer.ProposerPriority, + }, + TotalVotingPower: state.Validators.TotalVotingPower(), + }, + }, + signerAddr, + ) + require.NoError(b, err) + msgs[index] = msg + err = signer.IncrementSequence(signerName) + require.NoError(b, err) + } + + return msgs +} + +var chainID = "test" + +func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types0.PrivValidator) { + vals := make([]types0.GenesisValidator, nVals) + privVals := make(map[string]types0.PrivValidator, nVals) + for i := 0; i < nVals; i++ { + secret := []byte(fmt.Sprintf("test%d", i)) + pk := ed25519.GenPrivKeyFromSecret(secret) + valAddr := pk.PubKey().Address() + vals[i] = types0.GenesisValidator{ + Address: valAddr, + PubKey: pk.PubKey(), + Power: 1000, + Name: fmt.Sprintf("test%d", i), + } + privVals[valAddr.String()] = types0.NewMockPVWithParams(pk, false, false) + } + s, _ := sm.MakeGenesisState(&types0.GenesisDoc{ + ChainID: chainID, + Validators: vals, + AppHash: nil, + }) + + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + if err := stateStore.Save(s); err != nil { + panic(err) + } + + for i := 1; i < height; i++ { + s.LastBlockHeight++ + s.LastValidators = s.Validators.Copy() + if err := stateStore.Save(s); err != nil { + panic(err) + } + } + + return s, stateDB, privVals +} + +func makeValidCommit( + height int64, + blockID types0.BlockID, + vals *types0.ValidatorSet, + privVals map[string]types0.PrivValidator, +) (*types0.Commit, error) { + sigs := make([]types0.CommitSig, 0) + for i := 0; i < vals.Size(); i++ { + _, val := vals.GetByIndex(int32(i)) + vote, err := types0.MakeVote(height, blockID, vals, privVals[val.Address.String()], chainID, time.Now()) + if err != nil { + return nil, err + } + sigs = append(sigs, vote.CommitSig()) + } + return types0.NewCommit(height, 0, blockID, sigs), nil +} + +func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types0.BlockID { + var ( + h = make([]byte, tmhash.Size) + psH = make([]byte, tmhash.Size) + ) + copy(h, hash) + copy(psH, partSetHash) + return types0.BlockID{ + Hash: h, + PartSetHeader: types0.PartSetHeader{ + Total: partSetSize, + Hash: psH, + }, + } +} diff --git a/app/benchmarks/benchmark_msg_send_test.go b/app/benchmarks/benchmark_msg_send_test.go new file mode 100644 index 0000000000..7e79db9a67 --- /dev/null +++ b/app/benchmarks/benchmark_msg_send_test.go @@ -0,0 +1,333 @@ +//go:build bench_abci_methods + +package benchmarks_test + +import ( + "fmt" + "testing" + "time" + + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v3/pkg/user" + testutil "github.com/celestiaorg/celestia-app/v3/test/util" + "github.com/celestiaorg/celestia-app/v3/test/util/testfactory" + "github.com/celestiaorg/celestia-app/v3/test/util/testnode" + sdk "github.com/cosmos/cosmos-sdk/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proto/tendermint/version" +) + +func BenchmarkCheckTx_MsgSend_1(b *testing.B) { + testApp, rawTxs := generateMsgSendTransactions(b, 1) + testApp.Commit() + + checkTxRequest := types.RequestCheckTx{ + Tx: rawTxs[0], + Type: types.CheckTxType_New, + } + + b.ResetTimer() + resp := testApp.CheckTx(checkTxRequest) + b.StopTimer() + require.Equal(b, uint32(0), resp.Code) + require.Equal(b, "", resp.Codespace) + b.ReportMetric(float64(resp.GasUsed), "gas_used") +} + +func BenchmarkCheckTx_MsgSend_8MB(b *testing.B) { + testApp, rawTxs := generateMsgSendTransactions(b, 31645) + testApp.Commit() + + var totalGas int64 + b.ResetTimer() + for _, tx := range rawTxs { + checkTxRequest := types.RequestCheckTx{ + Tx: tx, + Type: types.CheckTxType_New, + } + b.StartTimer() + resp := testApp.CheckTx(checkTxRequest) + b.StopTimer() + require.Equal(b, uint32(0), resp.Code) + require.Equal(b, "", resp.Codespace) + totalGas += resp.GasUsed + } + + b.StopTimer() + b.ReportMetric(float64(totalGas), "total_gas_used") +} + +func BenchmarkDeliverTx_MsgSend_1(b *testing.B) { + testApp, rawTxs := generateMsgSendTransactions(b, 1) + + deliverTxRequest := types.RequestDeliverTx{ + Tx: rawTxs[0], + } + + b.ResetTimer() + resp := testApp.DeliverTx(deliverTxRequest) + b.StopTimer() + require.Equal(b, uint32(0), resp.Code) + require.Equal(b, "", resp.Codespace) + b.ReportMetric(float64(resp.GasUsed), "gas_used") +} + +func BenchmarkDeliverTx_MsgSend_8MB(b *testing.B) { + testApp, rawTxs := generateMsgSendTransactions(b, 31645) + + var totalGas int64 + b.ResetTimer() + for _, tx := range rawTxs { + deliverTxRequest := types.RequestDeliverTx{ + Tx: tx, + } + b.StartTimer() + resp := testApp.DeliverTx(deliverTxRequest) + b.StopTimer() + require.Equal(b, uint32(0), resp.Code) + require.Equal(b, "", resp.Codespace) + totalGas += resp.GasUsed + } + b.StopTimer() + b.ReportMetric(float64(totalGas), "total_gas_used") +} + +func BenchmarkPrepareProposal_MsgSend_1(b *testing.B) { + testApp, rawTxs := generateMsgSendTransactions(b, 1) + + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: &tmproto.Data{ + Txs: rawTxs, + }, + ChainId: testApp.GetChainID(), + Height: 10, + } + + b.ResetTimer() + resp := testApp.PrepareProposal(prepareProposalRequest) + b.StopTimer() + require.GreaterOrEqual(b, len(resp.BlockData.Txs), 1) + b.ReportMetric(float64(calculateTotalGasUsed(testApp, resp.BlockData.Txs)), "total_gas_used") +} + +func BenchmarkPrepareProposal_MsgSend_8MB(b *testing.B) { + // a full 8mb block equals to around 31645 msg send transactions. + // using 31645 to let prepare proposal choose the maximum + testApp, rawTxs := generateMsgSendTransactions(b, 31645) + + blockData := &tmproto.Data{ + Txs: rawTxs, + } + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: blockData, + ChainId: testApp.GetChainID(), + Height: 10, + } + + b.ResetTimer() + resp := testApp.PrepareProposal(prepareProposalRequest) + b.StopTimer() + require.GreaterOrEqual(b, len(resp.BlockData.Txs), 1) + b.ReportMetric(float64(len(resp.BlockData.Txs)), "number_of_transactions") + b.ReportMetric(calculateBlockSizeInMb(resp.BlockData.Txs), "block_size(mb)") + b.ReportMetric(float64(calculateTotalGasUsed(testApp, resp.BlockData.Txs)), "total_gas_used") +} + +func BenchmarkProcessProposal_MsgSend_1(b *testing.B) { + testApp, rawTxs := generateMsgSendTransactions(b, 1) + + blockData := &tmproto.Data{ + Txs: rawTxs, + } + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: blockData, + ChainId: testApp.GetChainID(), + Height: 10, + } + prepareProposalResponse := testApp.PrepareProposal(prepareProposalRequest) + require.GreaterOrEqual(b, len(prepareProposalResponse.BlockData.Txs), 1) + + processProposalRequest := types.RequestProcessProposal{ + BlockData: prepareProposalResponse.BlockData, + Header: tmproto.Header{ + Height: 1, + DataHash: prepareProposalResponse.BlockData.Hash, + ChainID: testutil.ChainID, + Version: version.Consensus{ + App: testApp.AppVersion(), + }, + }, + } + + b.ResetTimer() + resp := testApp.ProcessProposal(processProposalRequest) + b.StopTimer() + require.Equal(b, types.ResponseProcessProposal_ACCEPT, resp.Result) + + b.ReportMetric(float64(calculateTotalGasUsed(testApp, prepareProposalResponse.BlockData.Txs)), "total_gas_used") +} + +func BenchmarkProcessProposal_MsgSend_8MB(b *testing.B) { + // a full 8mb block equals to around 31645 msg send transactions. + // using 31645 to let prepare proposal choose the maximum + testApp, rawTxs := generateMsgSendTransactions(b, 31645) + + blockData := &tmproto.Data{ + Txs: rawTxs, + } + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: blockData, + ChainId: testApp.GetChainID(), + Height: 10, + } + prepareProposalResponse := testApp.PrepareProposal(prepareProposalRequest) + require.GreaterOrEqual(b, len(prepareProposalResponse.BlockData.Txs), 1) + + b.ReportMetric(float64(len(prepareProposalResponse.BlockData.Txs)), "number_of_transactions") + b.ReportMetric(calculateBlockSizeInMb(prepareProposalResponse.BlockData.Txs), "block_size_(mb)") + b.ReportMetric(float64(calculateTotalGasUsed(testApp, prepareProposalResponse.BlockData.Txs)), "total_gas_used") + + processProposalRequest := types.RequestProcessProposal{ + BlockData: prepareProposalResponse.BlockData, + Header: tmproto.Header{ + Height: 10, + DataHash: prepareProposalResponse.BlockData.Hash, + ChainID: testutil.ChainID, + Version: version.Consensus{ + App: testApp.AppVersion(), + }, + }, + } + + b.ResetTimer() + resp := testApp.ProcessProposal(processProposalRequest) + b.StopTimer() + require.Equal(b, types.ResponseProcessProposal_ACCEPT, resp.Result) + + b.ReportMetric(float64(calculateTotalGasUsed(testApp, prepareProposalResponse.BlockData.Txs)), "total_gas_used") +} + +func BenchmarkProcessProposal_MsgSend_8MB_Find_Half_Sec(b *testing.B) { + targetTimeLowerBound := 0.499 + targetTimeUpperBound := 0.511 + numberOfTransaction := 5500 + testApp, rawTxs := generateMsgSendTransactions(b, numberOfTransaction) + start := 0 + end := numberOfTransaction + segment := end - start + for { + if segment == 1 { + break + } + + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: &tmproto.Data{ + Txs: rawTxs[start:end], + }, + ChainId: testApp.GetChainID(), + Height: 10, + } + prepareProposalResponse := testApp.PrepareProposal(prepareProposalRequest) + require.GreaterOrEqual(b, len(prepareProposalResponse.BlockData.Txs), 1) + + processProposalRequest := types.RequestProcessProposal{ + BlockData: prepareProposalResponse.BlockData, + Header: tmproto.Header{ + Height: 10, + DataHash: prepareProposalResponse.BlockData.Hash, + ChainID: testutil.ChainID, + Version: version.Consensus{ + App: testApp.AppVersion(), + }, + }, + } + + startTime := time.Now() + resp := testApp.ProcessProposal(processProposalRequest) + endTime := time.Now() + require.Equal(b, types.ResponseProcessProposal_ACCEPT, resp.Result) + + timeElapsed := float64(endTime.Sub(startTime).Nanoseconds()) / 1e9 + + switch { + case timeElapsed < targetTimeLowerBound: + newEnd := end + segment/2 + if newEnd > len(rawTxs) { + newEnd = len(rawTxs) + } + end = newEnd + segment = end - start + if segment <= 1 { + break + } + continue + case timeElapsed > targetTimeUpperBound: + newEnd := end / 2 + if newEnd <= start { + break + } + end = newEnd + segment = end - start + continue + default: + b.ReportMetric(timeElapsed, fmt.Sprintf("elapsedTime(s)_%d", end-start)) + } + break + } +} + +// generateMsgSendTransactions creates a test app then generates a number +// of valid msg send transactions. +func generateMsgSendTransactions(b *testing.B, count int) (*app.App, [][]byte) { + account := "test" + testApp, kr := testutil.SetupTestAppWithGenesisValSetAndMaxSquareSize(app.DefaultConsensusParams(), 128, account) + addr := testfactory.GetAddress(kr, account) + enc := encoding.MakeConfig(app.ModuleEncodingRegisters...) + acc := testutil.DirectQueryAccount(testApp, addr) + signer, err := user.NewSigner(kr, enc.TxConfig, testutil.ChainID, appconsts.LatestVersion, user.NewAccount(account, acc.GetAccountNumber(), acc.GetSequence())) + require.NoError(b, err) + rawTxs := make([][]byte, 0, count) + for i := 0; i < count; i++ { + msg := banktypes.NewMsgSend( + addr, + testnode.RandomAddress().(sdk.AccAddress), + sdk.NewCoins(sdk.NewInt64Coin(appconsts.BondDenom, 10)), + ) + rawTx, err := signer.CreateTx([]sdk.Msg{msg}, user.SetGasLimit(1000000), user.SetFee(10)) + require.NoError(b, err) + rawTxs = append(rawTxs, rawTx) + err = signer.IncrementSequence(account) + require.NoError(b, err) + } + return testApp, rawTxs +} + +// mebibyte the number of bytes in a mebibyte +const mebibyte = 1048576 + +// calculateBlockSizeInMb returns the block size in mb given a set +// of raw transactions. +func calculateBlockSizeInMb(txs [][]byte) float64 { + numberOfBytes := 0 + for _, tx := range txs { + numberOfBytes += len(tx) + } + mb := float64(numberOfBytes) / mebibyte + return mb +} + +// calculateTotalGasUsed simulates the provided transactions and returns the +// total gas used by all of them +func calculateTotalGasUsed(testApp *app.App, txs [][]byte) uint64 { + var totalGas uint64 + for _, tx := range txs { + gasInfo, _, _ := testApp.Simulate(tx) + totalGas += gasInfo.GasUsed + } + return totalGas +} diff --git a/app/benchmarks/benchmark_pfb_test.go b/app/benchmarks/benchmark_pfb_test.go new file mode 100644 index 0000000000..274dbf8353 --- /dev/null +++ b/app/benchmarks/benchmark_pfb_test.go @@ -0,0 +1,381 @@ +//go:build bench_abci_methods + +package benchmarks_test + +import ( + "fmt" + "testing" + "time" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/log" + + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v3/pkg/user" + testutil "github.com/celestiaorg/celestia-app/v3/test/util" + "github.com/celestiaorg/celestia-app/v3/test/util/testfactory" + "github.com/celestiaorg/go-square/v2/share" + blobtx "github.com/celestiaorg/go-square/v2/tx" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proto/tendermint/version" +) + +func init() { + testutil.TestAppLogger = log.NewNopLogger() +} + +func BenchmarkCheckTx_PFB_Multi(b *testing.B) { + testCases := []struct { + blobSize int + }{ + {blobSize: 300}, + {blobSize: 500}, + {blobSize: 1000}, + {blobSize: 5000}, + {blobSize: 10_000}, + {blobSize: 50_000}, + {blobSize: 100_000}, + {blobSize: 200_000}, + {blobSize: 300_000}, + {blobSize: 400_000}, + {blobSize: 500_000}, + {blobSize: 1_000_000}, + {blobSize: 2_000_000}, + {blobSize: 3_000_000}, + {blobSize: 4_000_000}, + {blobSize: 5_000_000}, + {blobSize: 6_000_000}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("%d bytes", testCase.blobSize), func(b *testing.B) { + benchmarkCheckTxPFB(b, testCase.blobSize) + }) + } +} + +func benchmarkCheckTxPFB(b *testing.B, size int) { + testApp, rawTxs := generatePayForBlobTransactions(b, 1, size) + testApp.Commit() + + checkTxRequest := types.RequestCheckTx{ + Tx: rawTxs[0], + Type: types.CheckTxType_New, + } + + b.ResetTimer() + resp := testApp.CheckTx(checkTxRequest) + b.StopTimer() + require.Equal(b, uint32(0), resp.Code) + require.Equal(b, "", resp.Codespace) + b.ReportMetric(float64(resp.GasUsed), "gas_used") + b.ReportMetric(float64(len(rawTxs[0])), "transaction_size(byte)") +} + +func BenchmarkDeliverTx_PFB_Multi(b *testing.B) { + testCases := []struct { + blobSize int + }{ + {blobSize: 300}, + {blobSize: 500}, + {blobSize: 1000}, + {blobSize: 5000}, + {blobSize: 10_000}, + {blobSize: 50_000}, + {blobSize: 100_000}, + {blobSize: 200_000}, + {blobSize: 300_000}, + {blobSize: 400_000}, + {blobSize: 500_000}, + {blobSize: 1_000_000}, + {blobSize: 2_000_000}, + {blobSize: 3_000_000}, + {blobSize: 4_000_000}, + {blobSize: 5_000_000}, + {blobSize: 6_000_000}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("%d bytes", testCase.blobSize), func(b *testing.B) { + benchmarkDeliverTxPFB(b, testCase.blobSize) + }) + } +} + +func benchmarkDeliverTxPFB(b *testing.B, size int) { + testApp, rawTxs := generatePayForBlobTransactions(b, 1, size) + + blobTx, ok, err := blobtx.UnmarshalBlobTx(rawTxs[0]) + require.NoError(b, err) + require.True(b, ok) + + deliverTxRequest := types.RequestDeliverTx{ + Tx: blobTx.Tx, + } + + b.ResetTimer() + resp := testApp.DeliverTx(deliverTxRequest) + b.StopTimer() + require.Equal(b, uint32(0), resp.Code) + require.Equal(b, "", resp.Codespace) + b.ReportMetric(float64(resp.GasUsed), "gas_used") + b.ReportMetric(float64(len(rawTxs[0])), "transaction_size(byte)") +} + +func BenchmarkPrepareProposal_PFB_Multi(b *testing.B) { + testCases := []struct { + numberOfTransactions, blobSize int + }{ + {numberOfTransactions: 15_000, blobSize: 300}, + {numberOfTransactions: 10_000, blobSize: 500}, + {numberOfTransactions: 6_000, blobSize: 1000}, + {numberOfTransactions: 3_000, blobSize: 5000}, + {numberOfTransactions: 1_000, blobSize: 10_000}, + {numberOfTransactions: 500, blobSize: 50_000}, + {numberOfTransactions: 100, blobSize: 100_000}, + {numberOfTransactions: 100, blobSize: 200_000}, + {numberOfTransactions: 50, blobSize: 300_000}, + {numberOfTransactions: 50, blobSize: 400_000}, + {numberOfTransactions: 30, blobSize: 500_000}, + {numberOfTransactions: 10, blobSize: 1_000_000}, + {numberOfTransactions: 5, blobSize: 2_000_000}, + {numberOfTransactions: 3, blobSize: 3_000_000}, + {numberOfTransactions: 3, blobSize: 4_000_000}, + {numberOfTransactions: 2, blobSize: 5_000_000}, + {numberOfTransactions: 2, blobSize: 6_000_000}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("%d transactions of %d bytes", testCase.numberOfTransactions, testCase.blobSize), func(b *testing.B) { + benchmarkPrepareProposalPFB(b, testCase.numberOfTransactions, testCase.blobSize) + }) + } +} + +func benchmarkPrepareProposalPFB(b *testing.B, count, size int) { + testApp, rawTxs := generatePayForBlobTransactions(b, count, size) + + blockData := &tmproto.Data{ + Txs: rawTxs, + } + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: blockData, + ChainId: testApp.GetChainID(), + Height: 10, + } + + b.ResetTimer() + prepareProposalResponse := testApp.PrepareProposal(prepareProposalRequest) + b.StopTimer() + require.GreaterOrEqual(b, len(prepareProposalResponse.BlockData.Txs), 1) + b.ReportMetric(float64(b.Elapsed().Nanoseconds()), "prepare_proposal_time(ns)") + b.ReportMetric(float64(len(prepareProposalResponse.BlockData.Txs)), "number_of_transactions") + b.ReportMetric(float64(len(rawTxs[0])), "transactions_size(byte)") + b.ReportMetric(calculateBlockSizeInMb(prepareProposalResponse.BlockData.Txs), "block_size(mb)") + b.ReportMetric(float64(calculateTotalGasUsed(testApp, rawTxs)), "total_gas_used") +} + +func BenchmarkProcessProposal_PFB_Multi(b *testing.B) { + testCases := []struct { + numberOfTransactions, blobSize int + }{ + {numberOfTransactions: 15_000, blobSize: 300}, + {numberOfTransactions: 10_000, blobSize: 500}, + {numberOfTransactions: 6_000, blobSize: 1000}, + {numberOfTransactions: 3_000, blobSize: 5000}, + {numberOfTransactions: 1_000, blobSize: 10_000}, + {numberOfTransactions: 500, blobSize: 50_000}, + {numberOfTransactions: 100, blobSize: 100_000}, + {numberOfTransactions: 100, blobSize: 200_000}, + {numberOfTransactions: 50, blobSize: 300_000}, + {numberOfTransactions: 50, blobSize: 400_000}, + {numberOfTransactions: 30, blobSize: 500_000}, + {numberOfTransactions: 10, blobSize: 1_000_000}, + {numberOfTransactions: 5, blobSize: 2_000_000}, + {numberOfTransactions: 3, blobSize: 3_000_000}, + {numberOfTransactions: 3, blobSize: 4_000_000}, + {numberOfTransactions: 2, blobSize: 5_000_000}, + {numberOfTransactions: 2, blobSize: 6_000_000}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("%d transactions of %d bytes", testCase.numberOfTransactions, testCase.blobSize), func(b *testing.B) { + benchmarkProcessProposalPFB(b, testCase.numberOfTransactions, testCase.blobSize) + }) + } +} + +func benchmarkProcessProposalPFB(b *testing.B, count, size int) { + testApp, rawTxs := generatePayForBlobTransactions(b, count, size) + + blockData := &tmproto.Data{ + Txs: rawTxs, + } + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: blockData, + ChainId: testApp.GetChainID(), + Height: 10, + } + + prepareProposalResponse := testApp.PrepareProposal(prepareProposalRequest) + require.GreaterOrEqual(b, len(prepareProposalResponse.BlockData.Txs), 1) + + processProposalRequest := types.RequestProcessProposal{ + BlockData: prepareProposalResponse.BlockData, + Header: tmproto.Header{ + Height: 10, + DataHash: prepareProposalResponse.BlockData.Hash, + ChainID: testutil.ChainID, + Version: version.Consensus{ + App: testApp.AppVersion(), + }, + }, + } + + b.ResetTimer() + resp := testApp.ProcessProposal(processProposalRequest) + b.StopTimer() + require.Equal(b, types.ResponseProcessProposal_ACCEPT, resp.Result) + + b.ReportMetric(float64(b.Elapsed().Nanoseconds()), "process_proposal_time(ns)") + b.ReportMetric(float64(len(prepareProposalResponse.BlockData.Txs)), "number_of_transactions") + b.ReportMetric(float64(len(rawTxs[0])), "transactions_size(byte)") + b.ReportMetric(calculateBlockSizeInMb(prepareProposalResponse.BlockData.Txs), "block_size(mb)") + b.ReportMetric(float64(calculateTotalGasUsed(testApp, rawTxs)), "total_gas_used") +} + +func BenchmarkProcessProposal_PFB_Half_Second(b *testing.B) { + testCases := []struct { + numberOfTransactions, blobSize int + }{ + {numberOfTransactions: 11_000, blobSize: 50}, + {numberOfTransactions: 11_000, blobSize: 100}, + {numberOfTransactions: 11_000, blobSize: 200}, + {numberOfTransactions: 11_000, blobSize: 300}, + {numberOfTransactions: 11_000, blobSize: 400}, + {numberOfTransactions: 7000, blobSize: 500}, + {numberOfTransactions: 7000, blobSize: 600}, + {numberOfTransactions: 5000, blobSize: 1_000}, + {numberOfTransactions: 5000, blobSize: 1200}, + {numberOfTransactions: 5000, blobSize: 1500}, + {numberOfTransactions: 5000, blobSize: 1800}, + {numberOfTransactions: 5000, blobSize: 2000}, + } + for _, testCase := range testCases { + b.Run(fmt.Sprintf("%d transactions of %d bytes", testCase.numberOfTransactions, testCase.blobSize), func(b *testing.B) { + benchmarkProcessProposalPFBHalfSecond(b, testCase.numberOfTransactions, testCase.blobSize) + }) + } +} + +func benchmarkProcessProposalPFBHalfSecond(b *testing.B, count, size int) { + testApp, rawTxs := generatePayForBlobTransactions(b, count, size) + + targetTimeLowerBound := 0.499 + targetTimeUpperBound := 0.511 + + start := 0 + end := count + segment := end - start + maxIterations := 100000 + iterations := 0 + for { + iterations++ + if iterations >= maxIterations { + b.Errorf("Maximum iterations reached without achieving target processing time") + break + } + if segment == 1 { + break + } + + prepareProposalRequest := types.RequestPrepareProposal{ + BlockData: &tmproto.Data{ + Txs: rawTxs[start:end], + }, + ChainId: testApp.GetChainID(), + Height: 10, + } + prepareProposalResponse := testApp.PrepareProposal(prepareProposalRequest) + require.GreaterOrEqual(b, len(prepareProposalResponse.BlockData.Txs), 1) + + processProposalRequest := types.RequestProcessProposal{ + BlockData: prepareProposalResponse.BlockData, + Header: tmproto.Header{ + Height: 10, + DataHash: prepareProposalResponse.BlockData.Hash, + ChainID: testutil.ChainID, + Version: version.Consensus{ + App: testApp.AppVersion(), + }, + }, + } + + startTime := time.Now() + resp := testApp.ProcessProposal(processProposalRequest) + endTime := time.Now() + require.Equal(b, types.ResponseProcessProposal_ACCEPT, resp.Result) + + timeElapsed := float64(endTime.Sub(startTime).Nanoseconds()) / 1e9 + + switch { + case timeElapsed < targetTimeLowerBound: + newEnd := end + segment/2 + if newEnd > len(rawTxs) { + newEnd = len(rawTxs) + } + end = newEnd + segment = end - start + if segment <= 1 { + break + } + continue + case timeElapsed > targetTimeUpperBound: + newEnd := end / 2 + if newEnd <= start { + break + } + end = newEnd + segment = end - start + continue + default: + b.ReportMetric( + timeElapsed, + fmt.Sprintf( + "processProposalTime(s)_%d_%d_%f", + end-start, + size, + calculateBlockSizeInMb(prepareProposalResponse.BlockData.Txs[start:end]), + ), + ) + } + break + } +} + +// generatePayForBlobTransactions creates a test app then generates a number +// of valid PFB transactions. +func generatePayForBlobTransactions(b *testing.B, count int, size int) (*app.App, [][]byte) { + account := "test" + testApp, kr := testutil.SetupTestAppWithGenesisValSetAndMaxSquareSize(app.DefaultConsensusParams(), 128, account) + addr := testfactory.GetAddress(kr, account) + enc := encoding.MakeConfig(app.ModuleEncodingRegisters...) + acc := testutil.DirectQueryAccount(testApp, addr) + accountSequence := acc.GetSequence() + signer, err := user.NewSigner(kr, enc.TxConfig, testutil.ChainID, appconsts.LatestVersion, user.NewAccount(account, acc.GetAccountNumber(), acc.GetSequence())) + require.NoError(b, err) + + rawTxs := make([][]byte, 0, count) + randomBytes := crypto.CRandBytes(size) + blob, err := share.NewBlob(share.RandomNamespace(), randomBytes, 1, acc.GetAddress().Bytes()) + require.NoError(b, err) + for i := 0; i < count; i++ { + tx, _, err := signer.CreatePayForBlobs(account, []*share.Blob{blob}, user.SetGasLimit(2549760000), user.SetFee(10000)) + require.NoError(b, err) + rawTxs = append(rawTxs, tx) + accountSequence++ + err = signer.SetSequence(account, accountSequence) + require.NoError(b, err) + } + return testApp, rawTxs +} diff --git a/app/benchmarks/results.md b/app/benchmarks/results.md new file mode 100644 index 0000000000..9562a6a340 --- /dev/null +++ b/app/benchmarks/results.md @@ -0,0 +1,839 @@ + +# Benchmark results + +This document contains the results of the benchmarks defined under `app/benchmarks`. + +The benchmarks were run on a Macbook Pro M3 MAX with 16 cores 48GB RAM. + +The benchmarks will be run using an in memory DB, then a local db, goleveldb. + +## In memory DB benchmarks + +### `sendMsg` benchmarks + +#### CheckTx + +A single `checkTx` of a `sendMsg` message takes 0.0003585 **ns** to execute. And it uses 74374 gas. + +The transactions in an `8mb` block containing 31645 `sendMsg` messages take 6,29 s (6293858682 ns) to run `checkTx` on all of them. The total gas used is 1884371034 gas. + +#### DeliverTx + +A single `deliverTx` of a `sendMsg` message takes 0.0002890 **ns** to execute. And it uses 103251 gas. + +The transactions in an `8mb` block containing 31645 `sendMsg` messages take 7,56 s (7564111078 ns) to run `deliverTx` on all of them. The total gas used is 2801272121 gas. + +#### PrepareProposal + +A single `prepareProposal` of a `sendMsg` message takes 0.0002801 **ns** to execute. And it uses 101110 gas. + +An `8mb` block containing 31645 `sendMsg` messages takes 5,04 s (5049140917 ns) to execute. The total gas used 1843040790 gas. + +#### ProcessProposal + +A single `processProposal` of a `sendMsg` message takes 0.0002313 **ns** to execute. And it uses 101110 gas. + +An `8mb` block containing 31645 `sendMsg` messages takes 5,17 s (5179850250 ns) to execute. The total gas used 1,843,040,790 gas. + +For the processing time of a block full of `sendMsg`, we benchmarked how much time they take depending on the number of transactions, and we have the following results: + +| Number of Transactions | ElapsedTime(s) | Number of Transactions | ElapsedTime(s) | +|------------------------|----------------|------------------------|----------------| +| 1650 | 0.2494 | 1670 | 0.2594 | +| 1690 | 0.2628 | 1739 | 0.2723 | +| 1761 | 0.2732 | 1782 | 0.2770 | +| 1856 | 0.2878 | 1878 | 0.2976 | +| 1901 | 0.2990 | 1956 | 0.3023 | +| 1980 | 0.3076 | 2004 | 0.3232 | +| 2062 | 0.3252 | 2088 | 0.3257 | +| 2112 | 0.3326 | 2138 | 0.3417 | +| 2200 | 0.3398 | 2227 | 0.3495 | +| 2254 | 0.3545 | 2319 | 0.3688 | +| 2349 | 0.3684 | 2376 | 0.3771 | +| 2475 | 0.3972 | 2505 | 0.3928 | +| 2535 | 0.4080 | 2608 | 0.4098 | +| 2641 | 0.4123 | 2673 | 0.4135 | +| 2750 | 0.4614 | 2784 | 0.4333 | +| 2817 | 0.4537 | 2851 | 0.4530 | +| 2934 | 0.4633 | 2970 | 0.4623 | +| 3006 | 0.4863 | 3093 | 0.4821 | +| 3132 | 0.4888 | 3168 | 0.4962 | +| 3207 | 0.5058 | 3300 | 0.5119 | +| 3340 | 0.5275 | 3381 | 0.5280 | +| 3478 | 0.5441 | 3523 | 0.5473 | +| 3564 | 0.5546 | 3712 | 0.5743 | +| 3757 | 0.6081 | 3802 | 0.5970 | +| 3912 | 0.6093 | 3961 | 0.6125 | +| 4009 | 0.6329 | 4125 | 0.6663 | +| 4176 | 0.6395 | 4225 | 0.6615 | +| 4276 | 0.6844 | 4401 | 0.7190 | +| 4455 | 0.6943 | 4509 | 0.7006 | +| 4639 | 0.7219 | 4698 | 0.7365 | +| 4752 | 0.7340 | 5500 | 0.8489 | + +### `PFB` benchmarks + +#### CheckTx: `BenchmarkCheckTx_PFB_Multi` + +Benchmarks of `CheckTx` for a single PFB with different sizes: + +| Benchmark Name | Time (ns/op) | Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|---------------------------------------------|--------------|----------|--------------------------|-----------------------| +| BenchmarkCheckTx_PFB_Multi/300_bytes-16 | 0.0003121 ns | 74,664 | 703 | 0.000703 MB | +| BenchmarkCheckTx_PFB_Multi/500_bytes-16 | 0.0003392 ns | 74,664 | 903 | 0.000903 MB | +| BenchmarkCheckTx_PFB_Multi/1000_bytes-16 | 0.0002797 ns | 74,664 | 1,403 | 0.001403 MB | +| BenchmarkCheckTx_PFB_Multi/5000_bytes-16 | 0.0002818 ns | 74,664 | 5,403 | 0.005403 MB | +| BenchmarkCheckTx_PFB_Multi/10000_bytes-16 | 0.0003094 ns | 74,664 | 10,403 | 0.010403 MB | +| BenchmarkCheckTx_PFB_Multi/50000_bytes-16 | 0.0004127 ns | 74,674 | 50,406 | 0.050406 MB | +| BenchmarkCheckTx_PFB_Multi/100000_bytes-16 | 0.0004789 ns | 74,674 | 100,406 | 0.100406 MB | +| BenchmarkCheckTx_PFB_Multi/200000_bytes-16 | 0.0006958 ns | 74,674 | 200,406 | 0.200406 MB | +| BenchmarkCheckTx_PFB_Multi/300000_bytes-16 | 0.0008678 ns | 74,674 | 300,406 | 0.300406 MB | +| BenchmarkCheckTx_PFB_Multi/400000_bytes-16 | 0.001076 ns | 74,674 | 400,406 | 0.400406 MB | +| BenchmarkCheckTx_PFB_Multi/500000_bytes-16 | 0.001307 ns | 74,674 | 500,406 | 0.500406 MB | +| BenchmarkCheckTx_PFB_Multi/1000000_bytes-16 | 0.002291 ns | 74,674 | 1,000,406 | 1.000406 MB | +| BenchmarkCheckTx_PFB_Multi/2000000_bytes-16 | 0.005049 ns | 74,674 | 2,000,406 | 2.000406 MB | +| BenchmarkCheckTx_PFB_Multi/3000000_bytes-16 | 0.006911 ns | 74,684 | 3,000,409 | 3.000409 MB | +| BenchmarkCheckTx_PFB_Multi/4000000_bytes-16 | 0.008246 ns | 74,684 | 4,000,409 | 4.000409 MB | +| BenchmarkCheckTx_PFB_Multi/5000000_bytes-16 | 0.01127 ns | 74,684 | 5,000,409 | 5.000409 MB | +| BenchmarkCheckTx_PFB_Multi/6000000_bytes-16 | 0.01316 ns | 74,684 | 6,000,409 | 6.000409 MB | + +#### DeliverTx: `BenchmarkDeliverTx_PFB_Multi` + +Benchmarks of `DeliverTx` for a single PFB with different sizes: + +| Benchmark Name | Time (ns/op) | Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|-----------------------------------------------|--------------|------------|--------------------------|-----------------------| +| BenchmarkDeliverTx_PFB_Multi/300_bytes-16 | 0.0002718 ns | 77,682 | 703 | 0.000703 MB | +| BenchmarkDeliverTx_PFB_Multi/500_bytes-16 | 0.0002574 ns | 81,778 | 903 | 0.000903 MB | +| BenchmarkDeliverTx_PFB_Multi/1000_bytes-16 | 0.0002509 ns | 85,874 | 1,403 | 0.001403 MB | +| BenchmarkDeliverTx_PFB_Multi/5000_bytes-16 | 0.0002755 ns | 118,642 | 5,403 | 0.005403 MB | +| BenchmarkDeliverTx_PFB_Multi/10000_bytes-16 | 0.0002726 ns | 159,602 | 10,403 | 0.010403 MB | +| BenchmarkDeliverTx_PFB_Multi/50000_bytes-16 | 0.0002795 ns | 499,580 | 50,406 | 0.050406 MB | +| BenchmarkDeliverTx_PFB_Multi/100000_bytes-16 | 0.0002488 ns | 925,564 | 100,406 | 0.100406 MB | +| BenchmarkDeliverTx_PFB_Multi/200000_bytes-16 | 0.0002487 ns | 1,773,436 | 200,406 | 0.200406 MB | +| BenchmarkDeliverTx_PFB_Multi/300000_bytes-16 | 0.0002887 ns | 2,625,404 | 300,406 | 0.300406 MB | +| BenchmarkDeliverTx_PFB_Multi/400000_bytes-16 | 0.0002810 ns | 3,473,276 | 400,406 | 0.400406 MB | +| BenchmarkDeliverTx_PFB_Multi/500000_bytes-16 | 0.0002616 ns | 4,325,244 | 500,406 | 0.500406 MB | +| BenchmarkDeliverTx_PFB_Multi/1000000_bytes-16 | 0.0003983 ns | 8,572,796 | 1,000,406 | 1.000406 MB | +| BenchmarkDeliverTx_PFB_Multi/2000000_bytes-16 | 0.0003368 ns | 17,071,996 | 2,000,406 | 2.000406 MB | +| BenchmarkDeliverTx_PFB_Multi/3000000_bytes-16 | 0.0005770 ns | 25,571,206 | 3,000,409 | 3.000409 MB | +| BenchmarkDeliverTx_PFB_Multi/4000000_bytes-16 | 0.0003752 ns | 34,066,310 | 4,000,409 | 4.000409 MB | +| BenchmarkDeliverTx_PFB_Multi/5000000_bytes-16 | 0.0003788 ns | 42,565,510 | 5,000,409 | 5.000409 MB | +| BenchmarkDeliverTx_PFB_Multi/6000000_bytes-16 | 0.0003975 ns | 51,064,710 | 6,000,409 | 6.000409 MB | + +#### PrepareProposal: `BenchmarkPrepareProposal_PFB_Multi` + +The benchmarks for `PrepareProposal` for 8mb blocks containing PFBs of different sizes: + +| Benchmark Name | Block Size (MB) | Number of Transactions | Prepare Proposal Time (s) | Total Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|------------------------------------------------------------------------|-----------------|------------------------|---------------------------|-----------------|--------------------------|-----------------------| +| BenchmarkPrepareProposal_PFB_Multi/15000_transactions_of_300_bytes-16 | 6.239 | 10,318 | 2.411 s | 988,490,895,000 | 703 | 0.000703 MB | +| BenchmarkPrepareProposal_PFB_Multi/10000_transactions_of_500_bytes-16 | 5.035 | 6,331 | 1.710 s | 439,343,930,000 | 903 | 0.000903 MB | +| BenchmarkPrepareProposal_PFB_Multi/6000_transactions_of_1000_bytes-16 | 5.809 | 4,566 | 1.033 s | 158,174,358,000 | 1,403 | 0.001403 MB | +| BenchmarkPrepareProposal_PFB_Multi/3000_transactions_of_5000_bytes-16 | 7.188 | 1,413 | 0.547 s | 39,550,179,000 | 5,403 | 0.005403 MB | +| BenchmarkPrepareProposal_PFB_Multi/1000_transactions_of_10000_bytes-16 | 7.470 | 758 | 0.210 s | 4,397,393,000 | 10,403 | 0.010403 MB | +| BenchmarkPrepareProposal_PFB_Multi/500_transactions_of_50000_bytes-16 | 7.441 | 155 | 0.127 s | 1,100,446,500 | 50,406 | 0.050406 MB | +| BenchmarkPrepareProposal_PFB_Multi/100_transactions_of_100000_bytes-16 | 7.368 | 77 | 0.045 s | 44,369,300 | 100,406 | 0.100406 MB | +| BenchmarkPrepareProposal_PFB_Multi/100_transactions_of_200000_bytes-16 | 7.260 | 38 | 0.059 s | 44,369,300 | 200,406 | 0.200406 MB | +| BenchmarkPrepareProposal_PFB_Multi/50_transactions_of_300000_bytes-16 | 7.161 | 25 | 0.056 s | 11,202,150 | 300,406 | 0.300406 MB | +| BenchmarkPrepareProposal_PFB_Multi/50_transactions_of_400000_bytes-16 | 7.254 | 19 | 0.054 s | 11,202,150 | 400,406 | 0.400406 MB | +| BenchmarkPrepareProposal_PFB_Multi/30_transactions_of_500000_bytes-16 | 7.157 | 15 | 0.041 s | 4,085,490 | 500,406 | 0.500406 MB | +| BenchmarkPrepareProposal_PFB_Multi/10_transactions_of_1000000_bytes-16 | 6.678 | 7 | 0.031 s | 483,230 | 1,000,406 | 1.000406 MB | +| BenchmarkPrepareProposal_PFB_Multi/5_transactions_of_2000000_bytes-16 | 5.723 | 3 | 0.032 s | 131,790 | 2,000,406 | 2.000406 MB | +| BenchmarkPrepareProposal_PFB_Multi/3_transactions_of_3000000_bytes-16 | 5.723 | 2 | 0.042 s | 52,716 | 3,000,409 | 3.000409 MB | +| BenchmarkPrepareProposal_PFB_Multi/3_transactions_of_4000000_bytes-16 | 3.815 | 1 | 0.040 s | 52,716 | 4,000,409 | 4.000409 MB | +| BenchmarkPrepareProposal_PFB_Multi/2_transactions_of_5000000_bytes-16 | 4.769 | 1 | 0.039 s | 26,358 | 5,000,409 | 5.000409 MB | +| BenchmarkPrepareProposal_PFB_Multi/2_transactions_of_6000000_bytes-16 | 5.722 | 1 | 0.032 s | 26,358 | 6,000,409 | 6.000409 MB | + +#### ProcessProposal: `BenchmarkProcessProposal_PFB_Multi` + +The benchmarks for `ProcessProposal` for 8mb blocks containing PFBs of different sizes: + +| Benchmark Name | Block Size (MB) | Number of Transactions | Process Proposal Time (s) | Total Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|------------------------------------------------------------------------|-----------------|------------------------|---------------------------|-----------------|--------------------------|-----------------------| +| BenchmarkProcessProposal_PFB_Multi/15000_transactions_of_300_bytes-16 | 6.239 | 10,318 | 1.767 s | 988,490,895,000 | 703 | 0.000703 MB | +| BenchmarkProcessProposal_PFB_Multi/10000_transactions_of_500_bytes-16 | 5.035 | 6,331 | 1.101 s | 439,343,930,000 | 903 | 0.000903 MB | +| BenchmarkProcessProposal_PFB_Multi/6000_transactions_of_1000_bytes-16 | 5.809 | 4,566 | 0.820 s | 158,174,358,000 | 1,403 | 0.001403 MB | +| BenchmarkProcessProposal_PFB_Multi/3000_transactions_of_5000_bytes-16 | 7.188 | 1,413 | 0.300 s | 39,550,179,000 | 5,403 | 0.005403 MB | +| BenchmarkProcessProposal_PFB_Multi/1000_transactions_of_10000_bytes-16 | 7.470 | 758 | 0.185 s | 4,397,393,000 | 10,403 | 0.010403 MB | +| BenchmarkProcessProposal_PFB_Multi/500_transactions_of_50000_bytes-16 | 7.441 | 155 | 0.092 s | 1,100,446,500 | 50,406 | 0.050406 MB | +| BenchmarkProcessProposal_PFB_Multi/100_transactions_of_100000_bytes-16 | 7.368 | 77 | 0.089 s | 44,369,300 | 100,406 | 0.100406 MB | +| BenchmarkProcessProposal_PFB_Multi/100_transactions_of_200000_bytes-16 | 7.260 | 38 | 0.060 s | 44,369,300 | 200,406 | 0.200406 MB | +| BenchmarkProcessProposal_PFB_Multi/50_transactions_of_300000_bytes-16 | 7.161 | 25 | 0.048 s | 11,202,150 | 300,406 | 0.300406 MB | +| BenchmarkProcessProposal_PFB_Multi/50_transactions_of_400000_bytes-16 | 7.254 | 19 | 0.051 s | 11,202,150 | 400,406 | 0.400406 MB | +| BenchmarkProcessProposal_PFB_Multi/30_transactions_of_500000_bytes-16 | 7.157 | 15 | 0.062 s | 4,085,490 | 500,406 | 0.500406 MB | +| BenchmarkProcessProposal_PFB_Multi/10_transactions_of_1000000_bytes-16 | 6.678 | 7 | 0.047 s | 483,230 | 1,000,406 | 1.000406 MB | +| BenchmarkProcessProposal_PFB_Multi/5_transactions_of_2000000_bytes-16 | 5.723 | 3 | 0.043 s | 131,790 | 2,000,406 | 2.000406 MB | +| BenchmarkProcessProposal_PFB_Multi/3_transactions_of_3000000_bytes-16 | 5.723 | 2 | 0.053 s | 52,716 | 3,000,409 | 3.000409 MB | +| BenchmarkProcessProposal_PFB_Multi/3_transactions_of_4000000_bytes-16 | 3.815 | 1 | 0.047 s | 52,716 | 4,000,409 | 4.000409 MB | +| BenchmarkProcessProposal_PFB_Multi/2_transactions_of_5000000_bytes-16 | 4.769 | 1 | 0.068 s | 26,358 | 5,000,409 | 5.000409 MB | +| BenchmarkProcessProposal_PFB_Multi/2_transactions_of_6000000_bytes-16 | 5.722 | 1 | 0.047 s | 26,358 | 6,000,409 | 6.000409 MB | + +### IBC `UpdateClient` benchmarks + +#### CheckTx: `BenchmarkIBC_CheckTx_Update_Client_Multi` + +The benchmarks of executing `checkTx` on a single transaction containing an IBC `updateClient` with different numbers of required signatures: + +| Benchmark Name | Time (ns/op) | Gas Used | Number of Validators | Number of Verified Signatures | Transaction Size (Bytes) | Transaction Size (MB) | +|-----------------------------------------------------------------------|--------------|-----------|----------------------|-------------------------------|--------------------------|-----------------------| +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_2-16 | 0.0007940 ns | 108,598 | 2.0 | 1.0 | 1,396 | 0.001396 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_10-16 | 0.002127 ns | 127,710 | 10.0 | 6.0 | 3,303 | 0.003303 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_25-16 | 0.003694 ns | 163,430 | 25.0 | 16.0 | 6,875 | 0.006875 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_50-16 | 0.004701 ns | 222,930 | 50.0 | 33.0 | 12,825 | 0.012825 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_75-16 | 0.004095 ns | 282,480 | 75.0 | 50.0 | 18,780 | 0.018780 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_100-16 | 0.004112 ns | 340,928 | 100.0 | 66.0 | 24,629 | 0.024629 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_125-16 | 0.007009 ns | 400,178 | 125.0 | 83.0 | 30,554 | 0.030554 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_150-16 | 0.004906 ns | 460,980 | 150.0 | 100.0 | 36,630 | 0.036630 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_175-16 | 0.01056 ns | 520,500 | 175.0 | 116.0 | 42,582 | 0.042582 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_200-16 | 0.01181 ns | 580,000 | 200.0 | 133.0 | 48,532 | 0.048532 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_225-16 | 0.01339 ns | 637,198 | 225.0 | 150.0 | 54,256 | 0.054256 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_250-16 | 0.01411 ns | 699,020 | 250.0 | 166.0 | 60,434 | 0.060434 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_300-16 | 0.01931 ns | 818,020 | 300.0 | 200.0 | 72,334 | 0.072334 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_400-16 | 0.02312 ns | 1,056,020 | 400.0 | 266.0 | 96,134 | 0.096134 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_500-16 | 0.01675 ns | 1,288,968 | 500.0 | 333.0 | 119,433 | 0.119433 MB | + +#### DeliverTx: `BenchmarkIBC_DeliverTx_Update_Client_Multi` + +The benchmarks of executing `deliverTx` on a single transaction containing an IBC `updateClient` with different numbers of required signatures: + +| Benchmark Name | Time (ns/op) | Gas Used | Number of Validators | Number of Verified Signatures | Transaction Size (Bytes) | Transaction Size (MB) | +|-------------------------------------------------------------------------|--------------|-----------|----------------------|-------------------------------|--------------------------|-----------------------| +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_2-16 | 0.0006931 ns | 107,520 | 2.0 | 1.0 | 1,396 | 0.001396 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_10-16 | 0.004647 ns | 126,480 | 10.0 | 6.0 | 3,292 | 0.003292 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_25-16 | 0.005861 ns | 162,352 | 25.0 | 16.0 | 6,875 | 0.006875 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_50-16 | 0.009248 ns | 221,852 | 50.0 | 33.0 | 12,825 | 0.012825 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_75-16 | 0.01252 ns | 281,402 | 75.0 | 50.0 | 18,780 | 0.018780 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_100-16 | 0.01239 ns | 339,850 | 100.0 | 66.0 | 24,629 | 0.024629 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_125-16 | 0.01300 ns | 400,402 | 125.0 | 83.0 | 30,680 | 0.030680 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_150-16 | 0.01691 ns | 459,902 | 150.0 | 100.0 | 36,630 | 0.036630 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_175-16 | 0.01560 ns | 517,620 | 175.0 | 116.0 | 42,406 | 0.042406 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_200-16 | 0.01894 ns | 578,922 | 200.0 | 133.0 | 48,532 | 0.048532 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_225-16 | 0.01714 ns | 638,422 | 225.0 | 150.0 | 54,482 | 0.054482 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_250-16 | 0.01736 ns | 697,942 | 250.0 | 166.0 | 60,434 | 0.060434 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_300-16 | 0.02008 ns | 816,942 | 300.0 | 200.0 | 72,334 | 0.072334 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_400-16 | 0.02320 ns | 1,054,942 | 400.0 | 266.0 | 96,134 | 0.096134 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_500-16 | 0.02724 ns | 1,288,522 | 500.0 | 333.0 | 119,492 | 0.119492 MB | + +#### PrepareProposal: `BenchmarkIBC_PrepareProposal_Update_Client_Multi` + +Benchmarks of an `8mb` containing the maximum number of IBC `UpdateClient` with different number of signatures: + +| Benchmark Name | Block Size (MB) | Number of Transactions | Number of Validators | Number of Verified Signatures | Prepare Proposal Time (s) | Total Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|-------------------------------------------------------------------------------|------------------|-------------------------|----------------------|-------------------------------|-----------------------------|------------------|----------------------------|------------------------| +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_10-16 | 7.464 | 2,367 | 10.0 | 6.0 | 0.571 s | 266,926,655 | 3,373 | 0.003373 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_25-16 | 7.465 | 1,138 | 25.0 | 16.0 | 0.436 s | 249,391,655 | 6,945 | 0.006945 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_50-16 | 7.462 | 610.0 | 50.0 | 33.0 | 0.271 s | 184,196,655 | 12,895 | 0.012895 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_75-16 | 7.452 | 416.0 | 75.0 | 50.0 | 0.181 s | 121,879,155 | 18,850 | 0.018850 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_100-16 | 7.453 | 316.0 | 100.0 | 66.0 | 0.180 s | 151,629,155 | 24,800 | 0.024800 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_125-16 | 7.462 | 255.0 | 125.0 | 83.0 | 0.197 s | 181,379,155 | 30,750 | 0.030750 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_150-16 | 7.441 | 213.0 | 150.0 | 100.0 | 0.207 s | 211,129,155 | 36,700 | 0.036700 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_175-16 | 7.432 | 183.0 | 175.0 | 116.0 | 0.215 s | 240,889,155 | 42,652 | 0.042652 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_200-16 | 7.467 | 162.0 | 200.0 | 133.0 | 0.227 s | 269,634,155 | 48,401 | 0.048401 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_225-16 | 7.451 | 144.0 | 225.0 | 150.0 | 0.235 s | 299,259,155 | 54,326 | 0.054326 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_250-16 | 7.462 | 130.0 | 250.0 | 166.0 | 0.242 s | 328,894,155 | 60,253 | 0.060253 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_300-16 | 7.450 | 108.0 | 300.0 | 200.0 | 0.270 s | 389,649,155 | 72,404 | 0.072404 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_400-16 | 7.426 | 81.0 | 400.0 | 266.0 | 0.304 s | 508,649,155 | 96,204 | 0.096204 MB | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_500-16 | 7.404 | 65.0 | 500.0 | 333.0 | 0.361 s | 625,144,155 | 119,503 | 0.119503 MB | + +#### ProcessProposal: `BenchmarkIBC_ProcessProposal_Update_Client_Multi` + +Benchmarks of an `8mb` containing the maximum number of IBC `UpdateClient` with different number of signatures: + +| Benchmark Name | Block Size (MB) | Number of Transactions | Number of Validators | Number of Verified Signatures | Process Proposal Time (s) | Total Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|-------------------------------------------------------------------------------|-----------------|------------------------|----------------------|-------------------------------|---------------------------|----------------|--------------------------|-----------------------| +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_2-16 | 7.457 | 5,574 | 2.0 | 1.0 | 1.022 s | 419,611,655 | 1,469 | 0.001469 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_10-16 | 7.464 | 2,367 | 10.0 | 6.0 | 0.455 s | 266,926,655 | 3,373 | 0.003373 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_25-16 | 7.465 | 1,138 | 25.0 | 16.0 | 0.270 s | 249,391,655 | 6,945 | 0.006945 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_50-16 | 7.462 | 610.0 | 50.0 | 33.0 | 0.181 s | 184,196,655 | 12,895 | 0.012895 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_75-16 | 7.452 | 416.0 | 75.0 | 50.0 | 0.150 s | 121,879,155 | 18,850 | 0.018850 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_100-16 | 7.453 | 316.0 | 100.0 | 66.0 | 0.132 s | 151,629,155 | 24,800 | 0.024800 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_125-16 | 7.462 | 255.0 | 125.0 | 83.0 | 0.122 s | 181,379,155 | 30,750 | 0.030750 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_150-16 | 7.441 | 213.0 | 150.0 | 100.0 | 0.107 s | 211,129,155 | 36,700 | 0.036700 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_175-16 | 7.442 | 184.0 | 175.0 | 116.0 | 0.092 s | 240,009,155 | 42,476 | 0.042476 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_200-16 | 7.452 | 161.0 | 200.0 | 133.0 | 0.098 s | 270,639,155 | 48,602 | 0.048602 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_225-16 | 7.430 | 143.0 | 225.0 | 150.0 | 0.089 s | 300,389,155 | 54,552 | 0.054552 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_250-16 | 7.435 | 129.0 | 250.0 | 166.0 | 0.081 s | 330,149,155 | 60,504 | 0.060504 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_300-16 | 7.450 | 108.0 | 300.0 | 200.0 | 0.078 s | 389,649,155 | 72,404 | 0.072404 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_400-16 | 7.426 | 81.0 | 400.0 | 266.0 | 0.077 s | 508,649,155 | 96,204 | 0.096204 MB | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_500-16 | 7.435 | 65.0 | 500.0 | 333.0 | 0.092 s | 627,649,155 | 120,004 | 0.120004 MB | + +#### Process proposal time with different number of transactions per block + +**50 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1467 | 0.532979 | 0.2508 | +| 1546 | 0.561684 | 0.2766 | +| 1566 | 0.568951 | 0.2511 | +| 1650 | 0.599472 | 0.2711 | +| 1739 | 0.631810 | 0.3007 | +| 1761 | 0.639804 | 0.2832 | +| 1856 | 0.674322 | 0.3017 | +| 1956 | 0.710657 | 0.3203 | +| 1980 | 0.719378 | 0.3291 | +| 2062 | 0.749172 | 0.3670 | +| 2088 | 0.758619 | 0.3426 | +| 2200 | 0.799314 | 0.3610 | +| 2319 | 0.842553 | 0.3980 | +| 2349 | 0.853454 | 0.3794 | +| 2475 | 0.899236 | 0.4086 | +| 2608 | 0.947561 | 0.4555 | +| 2641 | 0.959552 | 0.4561 | +| 2750 | 0.999157 | 0.4920 | +| 2784 | 1.011511 | 0.4782 | +| 2934 | 1.066013 | 0.5209 | +| 2970 | 1.079094 | 0.5069 | +| 3093 | 1.123786 | 0.5816 | +| 3132 | 1.137957 | 0.5360 | +| 3300 | 1.198999 | 0.5766 | +| 3478 | 1.263676 | 0.6072 | +| 3523 | 1.280026 | 0.6028 | +| 3712 | 1.348700 | 0.6394 | +| 3912 | 1.421370 | 0.6928 | +| 3961 | 1.439174 | 0.6559 | +| 4125 | 1.498763 | 0.7463 | +| 4176 | 1.517294 | 0.6967 | +| 5500 | 1.998369 | 0.9183 | +| 11000 | 3.753713 | 1.732 | + +**100 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1546 | 0.636877 | 0.2726 | +| 1739 | 0.716391 | 0.2762 | +| 1956 | 0.805792 | 0.3207 | +| 2062 | 0.849463 | 0.3361 | +| 2319 | 0.955343 | 0.3774 | +| 2608 | 1.074408 | 0.4387 | +| 2750 | 1.132910 | 0.4873 | +| 2934 | 1.208715 | 0.5015 | +| 3093 | 1.274221 | 0.5202 | +| 3478 | 1.432837 | 0.5797 | +| 3912 | 1.611639 | 0.6520 | +| 4125 | 1.699392 | 0.6758 | +| 5500 | 2.265875 | 0.9318 | +| 11000 | 4.256186 | 1.685 | + +**200 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1546 | 0.787264 | 0.2472 | +| 1739 | 0.885551 | 0.3009 | +| 1956 | 0.996061 | 0.3188 | +| 2062 | 1.050043 | 0.3400 | +| 2319 | 1.180923 | 0.3781 | +| 2608 | 1.328100 | 0.4439 | +| 2750 | 1.400415 | 0.4720 | +| 2934 | 1.494120 | 0.5049 | +| 3093 | 1.575092 | 0.5384 | +| 3478 | 1.771158 | 0.5913 | +| 3912 | 1.992178 | 0.6459 | +| 4125 | 2.100651 | 0.6927 | +| 5500 | 2.800886 | 0.8970 | +| 11000 | 5.254511 | 1.691 | + +**300 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1546 | 0.934702 | 0.2506 | +| 1739 | 1.051395 | 0.2910 | +| 1956 | 1.182600 | 0.3316 | +| 2062 | 1.246691 | 0.3439 | +| 2319 | 1.402081 | 0.3830 | +| 2608 | 1.576818 | 0.4674 | +| 2750 | 1.662676 | 0.4803 | +| 2934 | 1.773928 | 0.5110 | +| 3093 | 1.870064 | 0.5431 | +| 3478 | 2.102846 | 0.6002 | +| 3912 | 2.365255 | 0.6659 | +| 4125 | 2.494041 | 0.7052 | +| 5500 | 3.325407 | 0.9117 | +| 11000 | 6.238512 | 1.688 | + +**400 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1375 | 0.962440 | 0.2425 | +| 1467 | 1.026840 | 0.2564 | +| 1546 | 1.082140 | 0.2583 | +| 1650 | 1.154940 | 0.2713 | +| 1739 | 1.217239 | 0.2854 | +| 1856 | 1.299139 | 0.3204 | +| 1956 | 1.369139 | 0.3205 | +| 2062 | 1.443338 | 0.3535 | +| 2200 | 1.539938 | 0.3674 | +| 2319 | 1.623238 | 0.3873 | +| 2475 | 1.732437 | 0.4184 | +| 2608 | 1.825537 | 0.4635 | +| 2750 | 1.924936 | 0.5227 | +| 2784 | 1.948736 | 0.5029 | +| 2934 | 2.053736 | 0.5193 | +| 3093 | 2.165035 | 0.5505 | +| 3300 | 2.309935 | 0.6121 | +| 3478 | 2.434534 | 0.6077 | +| 3712 | 2.598333 | 0.6534 | +| 3912 | 2.738333 | 0.6625 | +| 5500 | 3.849928 | 0.9410 | +| 11000 | 7.222513 | 1.782 | + +**500 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1476 | 1.173903 | 0.2640 | +| 1660 | 1.320250 | 0.3192 | +| 1750 | 1.391832 | 0.3249 | +| 1867 | 1.484890 | 0.3494 | +| 1968 | 1.565222 | 0.3664 | +| 2214 | 1.760881 | 0.4322 | +| 2490 | 1.980402 | 0.4667 | +| 2625 | 2.087776 | 0.4795 | +| 2800 | 2.226965 | 0.5033 | +| 2952 | 2.347860 | 0.5529 | +| 3321 | 2.641350 | 0.6263 | +| 3500 | 2.783720 | 0.6101 | +| 3735 | 2.970631 | 0.6629 | +| 3937 | 3.131294 | 0.7341 | +| 7000 | 5.035397 | 1.127 | + +**600 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1400 | 1.246969 | 0.2492 | +| 1417 | 1.262112 | 0.2554 | +| 1432 | 1.275473 | 0.2465 | +| 1476 | 1.314665 | 0.2575 | +| 1494 | 1.330698 | 0.2716 | +| 1510 | 1.344950 | 0.2729 | +| 1575 | 1.402847 | 0.2777 | +| 1593 | 1.418880 | 0.3210 | +| 1611 | 1.434914 | 0.3269 | +| 1660 | 1.478559 | 0.3331 | +| 1680 | 1.496374 | 0.3202 | +| 1698 | 1.512407 | 0.3387 | +| 1750 | 1.558725 | 0.3430 | +| 1771 | 1.577431 | 0.3476 | +| 1791 | 1.595245 | 0.3550 | +| 1812 | 1.613951 | 0.3526 | +| 1867 | 1.662941 | 0.3702 | +| 1890 | 1.683428 | 0.3592 | +| 1910 | 1.701242 | 0.3728 | +| 1968 | 1.752905 | 0.3790 | +| 1992 | 1.774282 | 0.3636 | +| 2014 | 1.793879 | 0.3740 | +| 2100 | 1.870481 | 0.4125 | +| 2125 | 1.892750 | 0.3915 | +| 2148 | 1.913237 | 0.4158 | +| 2214 | 1.972025 | 0.4057 | +| 2241 | 1.996075 | 0.4231 | +| 2265 | 2.017452 | 0.4210 | +| 2362 | 2.103853 | 0.4392 | +| 2389 | 2.127903 | 0.4406 | +| 2416 | 2.151953 | 0.4700 | +| 2490 | 2.217867 | 0.4615 | +| 2520 | 2.244589 | 0.4727 | +| 2547 | 2.268639 | 0.4743 | +| 2625 | 2.338116 | 0.4812 | +| 2656 | 2.365728 | 0.4923 | +| 2686 | 2.392450 | 0.4905 | +| 2718 | 2.420954 | 0.5042 | +| 2800 | 2.493994 | 0.5309 | +| 2835 | 2.525169 | 0.5166 | +| 2865 | 2.551891 | 0.5340 | +| 2952 | 2.629385 | 0.5378 | +| 2988 | 2.661451 | 0.5504 | +| 3021 | 2.690845 | 0.5532 | +| 3150 | 2.805750 | 0.5948 | +| 3187 | 2.838707 | 0.5747 | +| 3222 | 2.869883 | 0.5986 | +| 3321 | 2.958065 | 0.6170 | +| 3361 | 2.993694 | 0.6092 | +| 3397 | 3.025761 | 0.6193 | +| 3500 | 3.117506 | 0.6357 | +| 3543 | 3.155807 | 0.6425 | +| 3583 | 3.191437 | 0.6764 | +| 3624 | 3.227957 | 0.6628 | +| 3735 | 3.326828 | 0.6819 | +| 3780 | 3.366911 | 0.6935 | +| 3820 | 3.402540 | 0.7127 | +| 3937 | 3.506756 | 0.7093 | +| 3984 | 3.548620 | 0.7404 | +| 4029 | 3.588703 | 0.7535 | +| 7000 | 5.639168 | 1.133 | + +**1000 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1333 | 1.695789 | 0.2682 | +| 1348 | 1.714872 | 0.2605 | +| 1406 | 1.788660 | 0.2858 | +| 1422 | 1.809015 | 0.2827 | +| 1437 | 1.828098 | 0.2881 | +| 1499 | 1.906975 | 0.2945 | +| 1516 | 1.928602 | 0.2985 | +| 1581 | 2.011295 | 0.3039 | +| 1599 | 2.034195 | 0.3111 | +| 1616 | 2.055822 | 0.3185 | +| 1686 | 2.144876 | 0.3450 | +| 1705 | 2.169048 | 0.3501 | +| 1778 | 2.261919 | 0.3496 | +| 1798 | 2.287363 | 0.3554 | +| 1818 | 2.312807 | 0.3507 | +| 1875 | 2.385323 | 0.3849 | +| 1896 | 2.412039 | 0.3877 | +| 1917 | 2.438755 | 0.3746 | +| 1999 | 2.543076 | 0.3815 | +| 2022 | 2.572336 | 0.4042 | +| 2109 | 2.683018 | 0.4223 | +| 2133 | 2.713551 | 0.4126 | +| 2155 | 2.741539 | 0.4115 | +| 2248 | 2.859854 | 0.4183 | +| 2274 | 2.892931 | 0.4343 | +| 2371 | 3.016335 | 0.4642 | +| 2398 | 3.050684 | 0.4631 | +| 2424 | 3.083761 | 0.4575 | +| 2500 | 3.180449 | 0.4825 | +| 2529 | 3.217342 | 0.4757 | +| 2557 | 3.252964 | 0.4812 | +| 2667 | 3.392906 | 0.5144 | +| 2697 | 3.431072 | 0.5141 | +| 2727 | 3.469238 | 0.5071 | +| 2812 | 3.577375 | 0.5250 | +| 2844 | 3.618086 | 0.5359 | +| 2875 | 3.657524 | 0.5506 | +| 2998 | 3.814005 | 0.5659 | +| 3033 | 3.858532 | 0.5797 | +| 3163 | 4.023918 | 0.5964 | +| 3199 | 4.069717 | 0.6023 | +| 3232 | 4.111700 | 0.6142 | +| 3372 | 4.289808 | 0.6249 | +| 3411 | 4.339424 | 0.6465 | +| 3556 | 4.523893 | 0.6488 | +| 3597 | 4.576054 | 0.6829 | +| 3636 | 4.625669 | 0.6699 | +| 3750 | 4.770700 | 0.6820 | +| 3793 | 4.825405 | 0.6983 | +| 3835 | 4.878838 | 0.6991 | +| 5000 | 5.808817 | 0.8490 | + +**1200 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1406 | 2.056833 | 0.2758 | +| 1500 | 2.194349 | 0.3071 | +| 1581 | 2.312847 | 0.3054 | +| 1687 | 2.467918 | 0.3332 | +| 1778 | 2.601046 | 0.3569 | +| 1875 | 2.742950 | 0.3688 | +| 2000 | 2.925817 | 0.3793 | +| 2109 | 3.085278 | 0.4087 | +| 2250 | 3.291552 | 0.4359 | +| 2371 | 3.468567 | 0.4462 | +| 2500 | 3.657286 | 0.4789 | +| 2530 | 3.701174 | 0.4999 | +| 2667 | 3.901596 | 0.4836 | +| 2812 | 4.113722 | 0.5371 | +| 3000 | 4.388754 | 0.5768 | +| 3163 | 4.627213 | 0.5897 | +| 3375 | 4.937355 | 0.6156 | +| 3556 | 5.202147 | 0.6549 | +| 3750 | 5.485956 | 0.6933 | +| 4000 | 5.851690 | 0.7415 | +| 5000 | 6.679712 | 0.8498 | + +**1500 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1406 | 2.459093 | 0.2941 | +| 1581 | 2.765175 | 0.3109 | +| 1778 | 3.109735 | 0.3373 | +| 1875 | 3.279392 | 0.3706 | +| 2109 | 3.688667 | 0.4100 | +| 2371 | 4.146915 | 0.4601 | +| 2500 | 4.372541 | 0.4735 | +| 2667 | 4.664631 | 0.5013 | +| 2812 | 4.918242 | 0.5260 | +| 3163 | 5.532154 | 0.5946 | +| 3556 | 6.219526 | 0.6634 | +| 3750 | 6.245762 | 0.6879 | +| 5000 | 6.245762 | 0.6781 | + +**1800 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1333 | 2.712788 | 0.2643 | +| 1406 | 2.861353 | 0.2840 | +| 1422 | 2.893915 | 0.2843 | +| 1499 | 3.050621 | 0.2956 | +| 1581 | 3.217503 | 0.3094 | +| 1599 | 3.254135 | 0.3302 | +| 1686 | 3.431192 | 0.3396 | +| 1778 | 3.618425 | 0.3407 | +| 1798 | 3.659128 | 0.3397 | +| 1875 | 3.815834 | 0.3777 | +| 1896 | 3.858572 | 0.3813 | +| 1999 | 4.068192 | 0.3647 | +| 2109 | 4.292057 | 0.4191 | +| 2133 | 4.340900 | 0.4057 | +| 2248 | 4.574942 | 0.4349 | +| 2371 | 4.825264 | 0.4446 | +| 2398 | 4.880213 | 0.4481 | +| 2500 | 5.087797 | 0.4676 | +| 2529 | 5.146816 | 0.4740 | +| 2667 | 5.427666 | 0.5127 | +| 2697 | 5.488720 | 0.5039 | +| 2812 | 5.722761 | 0.5547 | +| 2844 | 5.787886 | 0.5411 | +| 2998 | 6.101297 | 0.5710 | +| 3163 | 6.437096 | 0.5896 | +| 3199 | 6.510361 | 0.5965 | +| 3372 | 6.862440 | 0.6149 | +| 3556 | 7.236906 | 0.6572 | +| 3597 | 7.267433 | 0.6716 | +| 5000 | 7.267433 | 0.6742 | + +**2000 bytes blobs**: + +| Number of Transactions | Block Size (bytes) | Elapsed Time (s) | +|------------------------|--------------------|------------------| +| 1406 | 3.129526 | 0.2732 | +| 1581 | 3.519054 | 0.3078 | +| 1778 | 3.957552 | 0.3477 | +| 1875 | 4.173462 | 0.3764 | +| 2109 | 4.694317 | 0.4059 | +| 2371 | 5.277496 | 0.4412 | +| 2500 | 5.564634 | 0.4664 | +| 2667 | 5.936356 | 0.5006 | +| 2812 | 6.259108 | 0.5262 | +| 3163 | 6.526213 | 0.5574 | +| 3556 | 6.526213 | 0.5667 | +| 3750 | 6.526213 | 0.5509 | +| 5000 | 6.526213 | 0.5556 | + +## GoLevelDB benchmarks + +### `sendMsg` benchmarks + +#### CheckTx + +A single `checkTx` of a `sendMsg` message takes 0.0003071 **ns** to execute. And it uses 74374 gas. + +The transactions in an `8mb` block containing 31645 `sendMsg` messages take 6,45 s (6455816060 ns) to run `checkTx` on all of them. The total gas used is 1884371034 gas. + +#### DeliverTx + +A single `deliverTx` of a `sendMsg` message takes 0.0003948 **ns** to execute. And it uses 103251 gas. + +The transactions in an `8mb` block containing 31645 `sendMsg` messages take 7,50 s (7506830940 ns) to run `deliverTx` on all of them. The total gas used is 2801272121 gas. + +#### PrepareProposal + +A single `prepareProposal` of a `sendMsg` message takes 0.0003943 **ns** to execute. And it uses 101110 gas. + +An `8mb` block containing 31645 `sendMsg` messages takes 5,2 s (5242159792 ns) to execute. The total gas used 1843040790 gas. + +#### ProcessProposal + +A single `processProposal` of a `sendMsg` message takes 0.0003010 **ns** to execute. And it uses 101110 gas. + +An `8mb` block containing 31645 `sendMsg` messages takes 5,21 s (5214205041 ns) to execute. The total gas used 1843040790 gas. + +### `PFB` benchmarks + +#### CheckTx: `BenchmarkCheckTx_PFB_Multi` + +Benchmarks of `CheckTx` for a single PFB with different sizes: + +| Benchmark Name | Time (ns/op) | Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|---------------------------------------------|--------------|----------|--------------------------|-----------------------| +| BenchmarkCheckTx_PFB_Multi/300_bytes-16 | 0.0005847 ns | 74,664 | 703 | 0.000703 MB | +| BenchmarkCheckTx_PFB_Multi/500_bytes-16 | 0.0005136 ns | 74,664 | 903 | 0.000903 MB | +| BenchmarkCheckTx_PFB_Multi/1000_bytes-16 | 0.0005754 ns | 74,664 | 1,403 | 0.001403 MB | +| BenchmarkCheckTx_PFB_Multi/5000_bytes-16 | 0.0005706 ns | 74,664 | 5,403 | 0.005403 MB | +| BenchmarkCheckTx_PFB_Multi/10000_bytes-16 | 0.0006885 ns | 74,664 | 10,403 | 0.010403 MB | +| BenchmarkCheckTx_PFB_Multi/50000_bytes-16 | 0.0006683 ns | 74,674 | 50,406 | 0.050406 MB | +| BenchmarkCheckTx_PFB_Multi/100000_bytes-16 | 0.0008378 ns | 74,674 | 100,406 | 0.100406 MB | +| BenchmarkCheckTx_PFB_Multi/200000_bytes-16 | 0.001130 ns | 74,674 | 200,406 | 0.200406 MB | +| BenchmarkCheckTx_PFB_Multi/300000_bytes-16 | 0.001164 ns | 74,674 | 300,406 | 0.300406 MB | +| BenchmarkCheckTx_PFB_Multi/400000_bytes-16 | 0.001550 ns | 74,674 | 400,406 | 0.400406 MB | +| BenchmarkCheckTx_PFB_Multi/500000_bytes-16 | 0.001829 ns | 74,674 | 500,406 | 0.500406 MB | +| BenchmarkCheckTx_PFB_Multi/1000000_bytes-16 | 0.002452 ns | 74,674 | 1,000,406 | 1.000406 MB | +| BenchmarkCheckTx_PFB_Multi/2000000_bytes-16 | 0.004647 ns | 74,674 | 2,000,406 | 2.000406 MB | +| BenchmarkCheckTx_PFB_Multi/3000000_bytes-16 | 0.006415 ns | 74,684 | 3,000,409 | 3.000409 MB | +| BenchmarkCheckTx_PFB_Multi/4000000_bytes-16 | 0.007709 ns | 74,684 | 4,000,409 | 4.000409 MB | +| BenchmarkCheckTx_PFB_Multi/5000000_bytes-16 | 0.01014 ns | 74,684 | 5,000,409 | 5.000409 MB | +| BenchmarkCheckTx_PFB_Multi/6000000_bytes-16 | 0.01153 ns | 74,684 | 6,000,409 | 6.000409 MB | + +#### DeliverTx: `BenchmarkDeliverTx_PFB_Multi` + +Benchmarks of `DeliverTx` for a single PFB with different sizes: + +| Benchmark Name | Time (ns/op) | Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|-----------------------------------------------|--------------|------------|--------------------------|-----------------------| +| BenchmarkDeliverTx_PFB_Multi/300_bytes-16 | 0.0005010 ns | 77,682 | 703 | 0.000703 MB | +| BenchmarkDeliverTx_PFB_Multi/500_bytes-16 | 0.0004297 ns | 81,778 | 903 | 0.000903 MB | +| BenchmarkDeliverTx_PFB_Multi/1000_bytes-16 | 0.0005227 ns | 85,874 | 1,403 | 0.001403 MB | +| BenchmarkDeliverTx_PFB_Multi/5000_bytes-16 | 0.0005552 ns | 118,642 | 5,403 | 0.005403 MB | +| BenchmarkDeliverTx_PFB_Multi/10000_bytes-16 | 0.0004537 ns | 159,602 | 10,403 | 0.010403 MB | +| BenchmarkDeliverTx_PFB_Multi/50000_bytes-16 | 0.0004896 ns | 499,580 | 50,406 | 0.050406 MB | +| BenchmarkDeliverTx_PFB_Multi/100000_bytes-16 | 0.0005505 ns | 925,564 | 100,406 | 0.100406 MB | +| BenchmarkDeliverTx_PFB_Multi/200000_bytes-16 | 0.0003661 ns | 1,773,436 | 200,406 | 0.200406 MB | +| BenchmarkDeliverTx_PFB_Multi/300000_bytes-16 | 0.0004681 ns | 2,625,404 | 300,406 | 0.300406 MB | +| BenchmarkDeliverTx_PFB_Multi/400000_bytes-16 | 0.0003012 ns | 3,473,276 | 400,406 | 0.400406 MB | +| BenchmarkDeliverTx_PFB_Multi/500000_bytes-16 | 0.0003164 ns | 4,325,244 | 500,406 | 0.500406 MB | +| BenchmarkDeliverTx_PFB_Multi/1000000_bytes-16 | 0.0004873 ns | 8,572,796 | 1,000,406 | 1.000406 MB | +| BenchmarkDeliverTx_PFB_Multi/2000000_bytes-16 | 0.0004004 ns | 17,071,996 | 2,000,406 | 2.000406 MB | +| BenchmarkDeliverTx_PFB_Multi/3000000_bytes-16 | 0.0003486 ns | 25,571,206 | 3,000,409 | 3.000409 MB | +| BenchmarkDeliverTx_PFB_Multi/4000000_bytes-16 | 0.0004354 ns | 34,066,310 | 4,000,409 | 4.000409 MB | +| BenchmarkDeliverTx_PFB_Multi/5000000_bytes-16 | 0.0003734 ns | 42,565,510 | 5,000,409 | 5.000409 MB | +| BenchmarkDeliverTx_PFB_Multi/6000000_bytes-16 | 0.0003595 ns | 51,064,710 | 6,000,409 | 6.000409 MB | + +#### PrepareProposal: `BenchmarkPrepareProposal_PFB_Multi` + +The benchmarks for `PrepareProposal` for 8mb blocks containing PFBs of different sizes: + +| Benchmark Name | Block Size (MB) | Number of Transactions | Prepare Proposal Time (s) | Total Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|------------------------------------------------------------------------|-----------------|------------------------|---------------------------|-----------------|--------------------------|-----------------------| +| BenchmarkPrepareProposal_PFB_Multi/15000_transactions_of_300_bytes-16 | 6.239 | 10,318 | 2.452 s | 988,490,895,000 | 703 | 0.000703 MB | +| BenchmarkPrepareProposal_PFB_Multi/10000_transactions_of_500_bytes-16 | 5.035 | 6,331 | 1.721 s | 439,343,930,000 | 903 | 0.000903 MB | +| BenchmarkPrepareProposal_PFB_Multi/6000_transactions_of_1000_bytes-16 | 5.809 | 4,566 | 1.063 s | 158,174,358,000 | 1,403 | 0.001403 MB | +| BenchmarkPrepareProposal_PFB_Multi/3000_transactions_of_5000_bytes-16 | 7.188 | 1,413 | 0.527 s | 39,550,179,000 | 5,403 | 0.005403 MB | +| BenchmarkPrepareProposal_PFB_Multi/1000_transactions_of_10000_bytes-16 | 7.470 | 758 | 0.210 s | 4,397,393,000 | 10,403 | 0.010403 MB | +| BenchmarkPrepareProposal_PFB_Multi/500_transactions_of_50000_bytes-16 | 7.441 | 155 | 0.125 s | 1,100,446,500 | 50,406 | 0.050406 MB | +| BenchmarkPrepareProposal_PFB_Multi/100_transactions_of_100000_bytes-16 | 7.368 | 77 | 0.061 s | 44,369,300 | 100,406 | 0.100406 MB | +| BenchmarkPrepareProposal_PFB_Multi/100_transactions_of_200000_bytes-16 | 7.260 | 38 | 0.058 s | 44,369,300 | 200,406 | 0.200406 MB | +| BenchmarkPrepareProposal_PFB_Multi/50_transactions_of_300000_bytes-16 | 7.161 | 25 | 0.042 s | 11,202,150 | 300,406 | 0.300406 MB | +| BenchmarkPrepareProposal_PFB_Multi/50_transactions_of_400000_bytes-16 | 7.254 | 19 | 0.038 s | 11,202,150 | 400,406 | 0.400406 MB | +| BenchmarkPrepareProposal_PFB_Multi/30_transactions_of_500000_bytes-16 | 7.157 | 15 | 0.031 s | 4,085,490 | 500,406 | 0.500406 MB | +| BenchmarkPrepareProposal_PFB_Multi/10_transactions_of_1000000_bytes-16 | 6.678 | 7 | 0.026 s | 483,230 | 1,000,406 | 1.000406 MB | +| BenchmarkPrepareProposal_PFB_Multi/5_transactions_of_2000000_bytes-16 | 5.723 | 3 | 0.027 s | 131,790 | 2,000,406 | 2.000406 MB | +| BenchmarkPrepareProposal_PFB_Multi/3_transactions_of_3000000_bytes-16 | 5.723 | 2 | 0.030 s | 52,716 | 3,000,409 | 3.000409 MB | +| BenchmarkPrepareProposal_PFB_Multi/3_transactions_of_4000000_bytes-16 | 3.815 | 1 | 0.026 s | 52,716 | 4,000,409 | 4.000409 MB | +| BenchmarkPrepareProposal_PFB_Multi/2_transactions_of_5000000_bytes-16 | 4.769 | 1 | 0.031 s | 26,358 | 5,000,409 | 5.000409 MB | +| BenchmarkPrepareProposal_PFB_Multi/2_transactions_of_6000000_bytes-16 | 5.722 | 1 | 0.028 s | 26,358 | 6,000,409 | 6.000409 MB | + +#### ProcessProposal: `BenchmarkProcessProposal_PFB_Multi` + +The benchmarks for `ProcessProposal` for 8mb blocks containing PFBs of different sizes: + +| Benchmark Name | Block Size (MB) | Number of Transactions | Process Proposal Time (s) | Total Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|------------------------------------------------------------------------|-----------------|------------------------|---------------------------|-----------------|--------------------------|-----------------------| +| BenchmarkProcessProposal_PFB_Multi/15000_transactions_of_300_bytes-16 | 6.239 | 10,318 | 1.813 s | 988,490,895,000 | 703 | 0.000703 MB | +| BenchmarkProcessProposal_PFB_Multi/10000_transactions_of_500_bytes-16 | 5.035 | 6,331 | 1.120 s | 439,343,930,000 | 903 | 0.000903 MB | +| BenchmarkProcessProposal_PFB_Multi/6000_transactions_of_1000_bytes-16 | 5.809 | 4,566 | 0.829 s | 158,174,358,000 | 1,403 | 0.001403 MB | +| BenchmarkProcessProposal_PFB_Multi/3000_transactions_of_5000_bytes-16 | 7.188 | 1,413 | 0.290 s | 39,550,179,000 | 5,403 | 0.005403 MB | +| BenchmarkProcessProposal_PFB_Multi/1000_transactions_of_10000_bytes-16 | 7.470 | 758 | 0.188 s | 4,397,393,000 | 10,403 | 0.010403 MB | +| BenchmarkProcessProposal_PFB_Multi/500_transactions_of_50000_bytes-16 | 7.441 | 155 | 0.076 s | 1,100,446,500 | 50,406 | 0.050406 MB | +| BenchmarkProcessProposal_PFB_Multi/100_transactions_of_100000_bytes-16 | 7.368 | 77 | 0.056 s | 44,369,300 | 100,406 | 0.100406 MB | +| BenchmarkProcessProposal_PFB_Multi/100_transactions_of_200000_bytes-16 | 7.260 | 38 | 0.050 s | 44,369,300 | 200,406 | 0.200406 MB | +| BenchmarkProcessProposal_PFB_Multi/50_transactions_of_300000_bytes-16 | 7.161 | 25 | 0.048 s | 11,202,150 | 300,406 | 0.300406 MB | +| BenchmarkProcessProposal_PFB_Multi/50_transactions_of_400000_bytes-16 | 7.254 | 19 | 0.048 s | 11,202,150 | 400,406 | 0.400406 MB | +| BenchmarkProcessProposal_PFB_Multi/30_transactions_of_500000_bytes-16 | 7.157 | 15 | 0.043 s | 4,085,490 | 500,406 | 0.500406 MB | +| BenchmarkProcessProposal_PFB_Multi/10_transactions_of_1000000_bytes-16 | 6.678 | 7 | 0.041 s | 483,230 | 1,000,406 | 1.000406 MB | +| BenchmarkProcessProposal_PFB_Multi/5_transactions_of_2000000_bytes-16 | 5.723 | 3 | 0.053 s | 131,790 | 2,000,406 | 2.000406 MB | +| BenchmarkProcessProposal_PFB_Multi/3_transactions_of_3000000_bytes-16 | 5.723 | 2 | 0.037 s | 52,716 | 3,000,409 | 3.000409 MB | +| BenchmarkProcessProposal_PFB_Multi/3_transactions_of_4000000_bytes-16 | 3.815 | 1 | 0.071 s | 52,716 | 4,000,409 | 4.000409 MB | +| BenchmarkProcessProposal_PFB_Multi/2_transactions_of_5000000_bytes-16 | 4.769 | 1 | 0.034 s | 26,358 | 5,000,409 | 5.000409 MB | +| BenchmarkProcessProposal_PFB_Multi/2_transactions_of_6000000_bytes-16 | 5.722 | 1 | 0.062 s | 26,358 | 6,000,409 | 6.000409 MB | + +### IBC `UpdateClient` benchmarks + +#### CheckTx: `BenchmarkIBC_CheckTx_Update_Client_Multi` + +The benchmarks of executing `checkTx` on a single transaction containing an IBC `updateClient` with different numbers of required signatures: + +| Benchmark Name | Time (ns/op) | Total Gas Used | Number of Validators | Number of Verified Signatures | Transaction Size (Bytes) | Transaction Size (MB) | +|-----------------------------------------------------------------------|--------------|----------------|----------------------|-------------------------------|--------------------------|-----------------------| +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_2-16 | 1,370 | 108,670 | 2 | 1 | 1,399 | 0.001399 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_10-16 | 3,577 | 127,710 | 10 | 6 | 3,303 | 0.003303 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_25-16 | 7,432 | 163,430 | 25 | 16 | 6,875 | 0.006875 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_50-16 | 9,879 | 222,930 | 50 | 33 | 12,825 | 0.012825 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_75-16 | 12,060 | 282,480 | 75 | 50 | 18,780 | 0.018780 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_100-16 | 13,080 | 341,980 | 100 | 66 | 24,730 | 0.024730 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_125-16 | 14,390 | 401,480 | 125 | 83 | 30,680 | 0.030680 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_150-16 | 16,440 | 459,428 | 150 | 100 | 36,479 | 0.036479 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_175-16 | 17,370 | 520,500 | 175 | 116 | 42,582 | 0.042582 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_200-16 | 18,840 | 580,000 | 200 | 133 | 48,532 | 0.048532 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_225-16 | 21,760 | 637,198 | 225 | 150 | 54,256 | 0.054256 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_250-16 | 19,680 | 699,020 | 250 | 166 | 60,434 | 0.060434 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_300-16 | 22,580 | 818,020 | 300 | 200 | 72,334 | 0.072334 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_400-16 | 25,990 | 1,056,020 | 400 | 266 | 96,134 | 0.096134 MB | +| BenchmarkIBC_CheckTx_Update_Client_Multi/number_of_validators:_500-16 | 27,100 | 1,288,968 | 500 | 333 | 119,433 | 0.119433 MB | + +#### DeliverTx: `BenchmarkIBC_DeliverTx_Update_Client_Multi` + +The benchmarks of executing `deliverTx` on a single transaction containing an IBC `updateClient` with different numbers of required signatures: + +| Benchmark Name | Time (ns/op) | Gas Used | Number of Validators | Number of Verified Signatures | Transaction Size (Bytes) | Transaction Size (MB) | +|-------------------------------------------------------------------------|--------------|-----------|----------------------|-------------------------------|--------------------------|-----------------------| +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_2-16 | 1,575 | 107,592 | 2 | 1 | 1,399 | 0.001399 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_10-16 | 1,240 | 126,632 | 10 | 6 | 3,303 | 0.003303 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_25-16 | 1,142 | 162,352 | 25 | 16 | 6,875 | 0.006875 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_50-16 | 16,260 | 221,852 | 50 | 33 | 12,825 | 0.012825 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_75-16 | 13,120 | 281,402 | 75 | 50 | 18,780 | 0.018780 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_100-16 | 7,336 | 340,902 | 100 | 66 | 24,730 | 0.024730 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_125-16 | 7,668 | 399,100 | 125 | 83 | 30,554 | 0.030554 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_150-16 | 5,603 | 459,902 | 150 | 100 | 36,630 | 0.036630 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_175-16 | 11,050 | 519,422 | 175 | 116 | 42,582 | 0.042582 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_200-16 | 9,553 | 578,922 | 200 | 133 | 48,532 | 0.048532 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_225-16 | 13,170 | 638,422 | 225 | 150 | 54,482 | 0.054482 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_250-16 | 8,286 | 695,390 | 250 | 166 | 60,183 | 0.060183 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_300-16 | 15,820 | 816,942 | 300 | 200 | 72,334 | 0.072334 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_400-16 | 19,650 | 1,050,890 | 400 | 266 | 95,733 | 0.095733 MB | +| BenchmarkIBC_DeliverTx_Update_Client_Multi/number_of_validators:_500-16 | 22,900 | 1,292,942 | 500 | 333 | 119,934 | 0.119934 MB | + +#### PrepareProposal: `BenchmarkIBC_PrepareProposal_Update_Client_Multi` + +Benchmarks of an `8mb` containing the maximum number of IBC `UpdateClient` with different number of signatures: + +| Benchmark Name | Block Size (MB) | Number of Transactions | Number of Validators | Number of Verified Signatures | Prepare Proposal Time (s) | Total Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|-------------------------------------------------------------------------------|-----------------|------------------------|----------------------|-------------------------------|---------------------------|----------------|--------------------------|-----------------------| +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_2-16 | 7.457 | 5,574 | 2 | 1 | 1.0729 | 389,819,345 | 1,469 | 0.001469 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_10-16 | 7.464 | 2,367 | 10 | 6 | 0.5564 | 210,605,480 | 3,373 | 0.003373 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_25-16 | 7.462 | 1,142 | 25 | 16 | 0.4047 | 142,106,425 | 6,919 | 0.006919 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_50-16 | 7.462 | 610 | 50 | 33 | 0.2432 | 112,364,505 | 12,895 | 0.012895 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_75-16 | 7.452 | 416 | 75 | 50 | 0.1357 | 101,405,415 | 18,850 | 0.018850 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_100-16 | 7.453 | 316 | 100 | 66 | 0.1573 | 95,833,915 | 24,800 | 0.024800 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_125-16 | 7.460 | 256 | 125 | 83 | 0.1653 | 92,549,255 | 30,624 | 0.030624 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_150-16 | 7.445 | 214 | 150 | 100 | 0.1804 | 90,046,805 | 36,549 | 0.036549 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_175-16 | 7.432 | 183 | 175 | 116 | 0.1916 | 88,172,820 | 42,652 | 0.042652 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_200-16 | 7.452 | 161 | 200 | 133 | 0.2167 | 87,153,710 | 48,602 | 0.048602 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_225-16 | 7.430 | 143 | 225 | 150 | 0.2065 | 85,919,620 | 54,552 | 0.054552 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_250-16 | 7.435 | 129 | 250 | 166 | 0.2292 | 85,187,130 | 60,504 | 0.060504 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_300-16 | 7.450 | 108 | 300 | 200 | 0.2440 | 84,173,555 | 72,404 | 0.072404 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_400-16 | 7.426 | 81 | 400 | 266 | 0.2959 | 82,411,590 | 96,204 | 0.096204 | +| BenchmarkIBC_PrepareProposal_Update_Client_Multi/number_of_validators:_500-16 | 7.435 | 65 | 500 | 333 | 0.3309 | 81,605,510 | 120,004 | 0.120004 | + +#### ProcessProposal: `BenchmarkIBC_ProcessProposal_Update_Client_Multi` + +Benchmarks of an `8mb` containing the maximum number of IBC `UpdateClient` with different number of signatures: + +| Benchmark Name | Block Size (MB) | Number of Transactions | Number of Validators | Number of Verified Signatures | Process Proposal Time (s) | Total Gas Used | Transaction Size (Bytes) | Transaction Size (MB) | +|-------------------------------------------------------------------------------|-----------------|------------------------|----------------------|-------------------------------|---------------------------|----------------|--------------------------|-----------------------| +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_2-16 | 7.457 | 5,586 | 2 | 1 | 1.0388 | 390,490,985 | 1,466 | 0.001466 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_10-16 | 7.464 | 2,367 | 10 | 6 | 0.4714 | 210,605,480 | 3,373 | 0.003373 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_25-16 | 7.465 | 1,138 | 25 | 16 | 0.2771 | 141,904,565 | 6,945 | 0.006945 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_50-16 | 7.462 | 610 | 50 | 33 | 0.1598 | 112,364,505 | 12,895 | 0.012895 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_75-16 | 7.452 | 416 | 75 | 50 | 0.1227 | 101,405,415 | 18,850 | 0.018850 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_100-16 | 7.453 | 316 | 100 | 66 | 0.1112 | 95,833,915 | 24,800 | 0.024800 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_125-16 | 7.462 | 255 | 125 | 83 | 0.1012 | 92,509,080 | 30,750 | 0.030750 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_150-16 | 7.441 | 213 | 150 | 100 | 0.1035 | 89,947,710 | 36,700 | 0.036700 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_175-16 | 7.432 | 183 | 175 | 116 | 0.0878 | 88,172,820 | 42,652 | 0.042652 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_200-16 | 7.467 | 162 | 200 | 133 | 0.0974 | 87,369,345 | 48,401 | 0.048401 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_225-16 | 7.451 | 144 | 225 | 150 | 0.0789 | 86,194,935 | 54,326 | 0.054326 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_250-16 | 7.428 | 129 | 250 | 166 | 0.0775 | 85,109,730 | 60,444 | 0.060444 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_300-16 | 7.450 | 108 | 300 | 200 | 0.0879 | 84,173,555 | 72,404 | 0.072404 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_400-16 | 7.426 | 81 | 400 | 266 | 0.0616 | 82,411,590 | 96,204 | 0.096204 | +| BenchmarkIBC_ProcessProposal_Update_Client_Multi/number_of_validators:_500-16 | 7.435 | 65 | 500 | 333 | 0.0596 | 81,605,510 | 120,004 | 0.120004 | + diff --git a/app/check_tx.go b/app/check_tx.go index 0867cd2436..f76b37880f 100644 --- a/app/check_tx.go +++ b/app/check_tx.go @@ -41,9 +41,8 @@ func (app *App) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { switch req.Type { // new transactions must be checked in their entirety case abci.CheckTxType_New: - // FIXME: we have a hardcoded subtree root threshold here. This is because we can't access - // the app version because the context is not initialized - err := blobtypes.ValidateBlobTx(app.txConfig, btx, appconsts.DefaultSubtreeRootThreshold) + appVersion := app.AppVersion() + err := blobtypes.ValidateBlobTx(app.txConfig, btx, appconsts.SubtreeRootThreshold(appVersion), appVersion) if err != nil { return sdkerrors.ResponseCheckTxWithEvents(err, 0, 0, []abci.Event{}, false) } diff --git a/app/default_overrides.go b/app/default_overrides.go index 0ceedaeee1..9e0b5dd382 100644 --- a/app/default_overrides.go +++ b/app/default_overrides.go @@ -8,7 +8,6 @@ import ( "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/x/mint" minttypes "github.com/celestiaorg/celestia-app/v3/x/mint/types" - "github.com/celestiaorg/go-square/v2/share" "github.com/cosmos/cosmos-sdk/codec" serverconfig "github.com/cosmos/cosmos-sdk/server/config" sdk "github.com/cosmos/cosmos-sdk/types" @@ -259,23 +258,17 @@ func DefaultEvidenceParams() tmproto.EvidenceParams { func DefaultConsensusConfig() *tmcfg.Config { cfg := tmcfg.DefaultConfig() // Set broadcast timeout to be 50 seconds in order to avoid timeouts for long block times - // TODO: make TimeoutBroadcastTx configurable per https://github.com/celestiaorg/celestia-app/issues/1034 cfg.RPC.TimeoutBroadcastTxCommit = 50 * time.Second cfg.RPC.MaxBodyBytes = int64(8388608) // 8 MiB - cfg.Mempool.TTLNumBlocks = 5 - cfg.Mempool.TTLDuration = time.Duration(cfg.Mempool.TTLNumBlocks) * appconsts.GoalBlockTime - // Given that there is a stateful transaction size check in CheckTx, - // We set a loose upper bound on what we expect the transaction to - // be based on the upper bound size of the entire block for the given - // version. This acts as a first line of DoS protection - upperBoundBytes := appconsts.DefaultSquareSizeUpperBound * appconsts.DefaultSquareSizeUpperBound * share.ContinuationSparseShareContentSize - cfg.Mempool.MaxTxBytes = upperBoundBytes - cfg.Mempool.MaxTxsBytes = int64(upperBoundBytes) * cfg.Mempool.TTLNumBlocks + cfg.Mempool.TTLNumBlocks = 12 + cfg.Mempool.TTLDuration = 75 * time.Second + cfg.Mempool.MaxTxBytes = 7_897_088 + cfg.Mempool.MaxTxsBytes = 39_485_440 cfg.Mempool.Version = "v1" // prioritized mempool - cfg.Consensus.TimeoutPropose = appconsts.TimeoutPropose - cfg.Consensus.TimeoutCommit = appconsts.TimeoutCommit + cfg.Consensus.TimeoutPropose = appconsts.GetTimeoutPropose(appconsts.LatestVersion) + cfg.Consensus.TimeoutCommit = appconsts.GetTimeoutCommit(appconsts.LatestVersion) cfg.Consensus.SkipTimeoutCommit = false cfg.TxIndex.Indexer = "null" diff --git a/app/default_overrides_test.go b/app/default_overrides_test.go index 674cd13d9f..c78195717a 100644 --- a/app/default_overrides_test.go +++ b/app/default_overrides_test.go @@ -80,11 +80,11 @@ func TestDefaultConsensusConfig(t *testing.T) { Size: tmcfg.DefaultMempoolConfig().Size, WalPath: tmcfg.DefaultMempoolConfig().WalPath, - // overrides + // Overrides MaxTxBytes: 7_897_088, MaxTxsBytes: 39_485_440, TTLDuration: 75 * time.Second, - TTLNumBlocks: 5, + TTLNumBlocks: 12, Version: "v1", } assert.Equal(t, want, *got.Mempool) diff --git a/app/module/versioned_ibc_module_test.go b/app/module/versioned_ibc_module_test.go index 8d2fb23d0e..57f94ec9f9 100644 --- a/app/module/versioned_ibc_module_test.go +++ b/app/module/versioned_ibc_module_test.go @@ -23,7 +23,7 @@ func TestVersionedIBCModule(t *testing.T) { mockWrappedModule := mocks.NewMockIBCModule(ctrl) mockNextModule := mocks.NewMockIBCModule(ctrl) - versionedModule := module.NewVersionedIBCModule(mockWrappedModule, mockNextModule, 2, 2) + versionedModule := module.NewVersionedIBCModule(mockWrappedModule, mockNextModule, 2, 3) testCases := []struct { name string diff --git a/app/modules.go b/app/modules.go index a89f418205..d629a99073 100644 --- a/app/modules.go +++ b/app/modules.go @@ -96,75 +96,75 @@ func (app *App) setupModuleManager(skipGenesisInvariants bool) error { app.manager, err = module.NewManager([]module.VersionedModule{ { Module: genutil.NewAppModule(app.AccountKeeper, app.StakingKeeper, app.BaseApp.DeliverTx, app.txConfig), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: auth.NewAppModule(app.appCodec, app.AccountKeeper, nil), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: vesting.NewAppModule(app.AccountKeeper, app.BankKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: bank.NewAppModule(app.appCodec, app.BankKeeper, app.AccountKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: capability.NewAppModule(app.appCodec, *app.CapabilityKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: feegrantmodule.NewAppModule(app.appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: crisis.NewAppModule(&app.CrisisKeeper, skipGenesisInvariants), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: gov.NewAppModule(app.appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: mint.NewAppModule(app.appCodec, app.MintKeeper, app.AccountKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: slashing.NewAppModule(app.appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: distr.NewAppModule(app.appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: staking.NewAppModule(app.appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: evidence.NewAppModule(app.EvidenceKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: authzmodule.NewAppModule(app.appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: ibc.NewAppModule(app.IBCKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: params.NewAppModule(app.ParamsKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: transfer.NewAppModule(app.TransferKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: blob.NewAppModule(app.appCodec, app.BlobKeeper), - FromVersion: v1, ToVersion: v2, + FromVersion: v1, ToVersion: v3, }, { Module: blobstream.NewAppModule(app.appCodec, app.BlobstreamKeeper), @@ -172,19 +172,19 @@ func (app *App) setupModuleManager(skipGenesisInvariants bool) error { }, { Module: signal.NewAppModule(app.SignalKeeper), - FromVersion: v2, ToVersion: v2, + FromVersion: v2, ToVersion: v3, }, { Module: minfee.NewAppModule(app.ParamsKeeper), - FromVersion: v2, ToVersion: v2, + FromVersion: v2, ToVersion: v3, }, { Module: packetforward.NewAppModule(app.PacketForwardKeeper), - FromVersion: v2, ToVersion: v2, + FromVersion: v2, ToVersion: v3, }, { Module: ica.NewAppModule(nil, &app.ICAHostKeeper), - FromVersion: v2, ToVersion: v2, + FromVersion: v2, ToVersion: v3, }, }) if err != nil { @@ -303,7 +303,7 @@ func allStoreKeys() []string { // versionedStoreKeys returns the store keys for each app version. func versionedStoreKeys() map[uint64][]string { return map[uint64][]string{ - 1: { + v1: { authtypes.StoreKey, authzkeeper.StoreKey, banktypes.StoreKey, @@ -321,7 +321,7 @@ func versionedStoreKeys() map[uint64][]string { stakingtypes.StoreKey, upgradetypes.StoreKey, }, - 2: { + v2: { authtypes.StoreKey, authzkeeper.StoreKey, banktypes.StoreKey, @@ -341,6 +341,26 @@ func versionedStoreKeys() map[uint64][]string { stakingtypes.StoreKey, upgradetypes.StoreKey, }, + v3: { // same as v2 + authtypes.StoreKey, + authzkeeper.StoreKey, + banktypes.StoreKey, + blobtypes.StoreKey, + capabilitytypes.StoreKey, + distrtypes.StoreKey, + evidencetypes.StoreKey, + feegrant.StoreKey, + govtypes.StoreKey, + ibchost.StoreKey, + ibctransfertypes.StoreKey, + icahosttypes.StoreKey, + minttypes.StoreKey, + packetforwardtypes.StoreKey, + signaltypes.StoreKey, + slashingtypes.StoreKey, + stakingtypes.StoreKey, + upgradetypes.StoreKey, + }, } } diff --git a/app/prepare_proposal.go b/app/prepare_proposal.go index 78049a1ad4..a34c64bd7d 100644 --- a/app/prepare_proposal.go +++ b/app/prepare_proposal.go @@ -1,13 +1,16 @@ package app import ( + "fmt" "time" "github.com/celestiaorg/celestia-app/v3/app/ante" "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/pkg/da" - square "github.com/celestiaorg/go-square/v2" - "github.com/celestiaorg/go-square/v2/share" + shares "github.com/celestiaorg/go-square/shares" + square "github.com/celestiaorg/go-square/square" + squarev2 "github.com/celestiaorg/go-square/v2" + sharev2 "github.com/celestiaorg/go-square/v2/share" "github.com/cosmos/cosmos-sdk/telemetry" abci "github.com/tendermint/tendermint/abci/types" core "github.com/tendermint/tendermint/proto/tendermint/types" @@ -27,7 +30,7 @@ func (app *App) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePr Height: req.Height, Time: req.Time, Version: version.Consensus{ - App: app.BaseApp.AppVersion(), + App: app.AppVersion(), }, }) handler := ante.NewAnteHandler( @@ -47,10 +50,31 @@ func (app *App) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePr // Build the square from the set of valid and prioritised transactions. // The txs returned are the ones used in the square and block. - dataSquare, txs, err := square.Build(txs, - app.MaxEffectiveSquareSize(sdkCtx), - appconsts.SubtreeRootThreshold(app.GetBaseApp().AppVersion()), + var ( + dataSquareBytes [][]byte + err error + size uint64 ) + switch app.AppVersion() { + case v3: + var dataSquare squarev2.Square + dataSquare, txs, err = squarev2.Build(txs, + app.MaxEffectiveSquareSize(sdkCtx), + appconsts.SubtreeRootThreshold(app.GetBaseApp().AppVersion()), + ) + dataSquareBytes = sharev2.ToBytes(dataSquare) + size = uint64(dataSquare.Size()) + case v2, v1: + var dataSquare square.Square + dataSquare, txs, err = square.Build(txs, + app.MaxEffectiveSquareSize(sdkCtx), + appconsts.SubtreeRootThreshold(app.GetBaseApp().AppVersion()), + ) + dataSquareBytes = shares.ToBytes(dataSquare) + size = uint64(dataSquare.Size()) + default: + err = fmt.Errorf("unsupported app version: %d", app.AppVersion()) + } if err != nil { panic(err) } @@ -58,7 +82,7 @@ func (app *App) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePr // Erasure encode the data square to create the extended data square (eds). // Note: uses the nmt wrapper to construct the tree. See // pkg/wrapper/nmt_wrapper.go for more information. - eds, err := da.ExtendShares(share.ToBytes(dataSquare)) + eds, err := da.ExtendShares(dataSquareBytes) if err != nil { app.Logger().Error( "failure to erasure the data square while creating a proposal block", @@ -84,7 +108,7 @@ func (app *App) PrepareProposal(req abci.RequestPrepareProposal) abci.ResponsePr return abci.ResponsePrepareProposal{ BlockData: &core.Data{ Txs: txs, - SquareSize: uint64(dataSquare.Size()), + SquareSize: size, Hash: dah.Hash(), // also known as the data root }, } diff --git a/app/process_proposal.go b/app/process_proposal.go index d1b367179c..68e574f67d 100644 --- a/app/process_proposal.go +++ b/app/process_proposal.go @@ -9,8 +9,10 @@ import ( "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/pkg/da" blobtypes "github.com/celestiaorg/celestia-app/v3/x/blob/types" - "github.com/celestiaorg/go-square/v2" - "github.com/celestiaorg/go-square/v2/share" + shares "github.com/celestiaorg/go-square/shares" + square "github.com/celestiaorg/go-square/square" + squarev2 "github.com/celestiaorg/go-square/v2" + sharev2 "github.com/celestiaorg/go-square/v2/share" blobtx "github.com/celestiaorg/go-square/v2/tx" "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" @@ -65,7 +67,15 @@ func (app *App) ProcessProposal(req abci.RequestProcessProposal) (resp abci.Resp tx = blobTx.Tx } + // todo: uncomment once we're sure this isn't consensus breaking + // sdkCtx = sdkCtx.WithTxBytes(tx) + sdkTx, err := app.txConfig.TxDecoder()(tx) + // Set the tx bytes in the context for app version v3 and greater + if sdkCtx.BlockHeader().Version.App >= 3 { + sdkCtx = sdkCtx.WithTxBytes(tx) + } + if err != nil { if req.Header.Version.App == v1 { // For appVersion 1, there was no block validity rule that all @@ -108,7 +118,7 @@ func (app *App) ProcessProposal(req abci.RequestProcessProposal) (resp abci.Resp // - that the sizes match // - that the namespaces match between blob and PFB // - that the share commitment is correct - if err := blobtypes.ValidateBlobTx(app.txConfig, blobTx, subtreeRootThreshold); err != nil { + if err := blobtypes.ValidateBlobTx(app.txConfig, blobTx, subtreeRootThreshold, app.AppVersion()); err != nil { logInvalidPropBlockError(app.Logger(), req.Header, fmt.Sprintf("invalid blob tx %d", idx), err) return reject() } @@ -122,24 +132,40 @@ func (app *App) ProcessProposal(req abci.RequestProcessProposal) (resp abci.Resp } - // Construct the data square from the block's transactions - dataSquare, err := square.Construct( - req.BlockData.Txs, - app.MaxEffectiveSquareSize(sdkCtx), - subtreeRootThreshold, + var ( + dataSquareBytes [][]byte + err error ) - if err != nil { - logInvalidPropBlockError(app.Logger(), req.Header, "failure to compute data square from transactions:", err) + + switch app.AppVersion() { + case v3: + var dataSquare squarev2.Square + dataSquare, err = squarev2.Construct(req.BlockData.Txs, app.MaxEffectiveSquareSize(sdkCtx), subtreeRootThreshold) + dataSquareBytes = sharev2.ToBytes(dataSquare) + // Assert that the square size stated by the proposer is correct + if uint64(dataSquare.Size()) != req.BlockData.SquareSize { + logInvalidPropBlock(app.Logger(), req.Header, "proposed square size differs from calculated square size") + return reject() + } + case v2, v1: + var dataSquare square.Square + dataSquare, err = square.Construct(req.BlockData.Txs, app.MaxEffectiveSquareSize(sdkCtx), subtreeRootThreshold) + dataSquareBytes = shares.ToBytes(dataSquare) + // Assert that the square size stated by the proposer is correct + if uint64(dataSquare.Size()) != req.BlockData.SquareSize { + logInvalidPropBlock(app.Logger(), req.Header, "proposed square size differs from calculated square size") + return reject() + } + default: + logInvalidPropBlock(app.Logger(), req.Header, "unsupported app version") return reject() } - - // Assert that the square size stated by the proposer is correct - if uint64(dataSquare.Size()) != req.BlockData.SquareSize { - logInvalidPropBlock(app.Logger(), req.Header, "proposed square size differs from calculated square size") + if err != nil { + logInvalidPropBlockError(app.Logger(), req.Header, "failure to compute data square from transactions:", err) return reject() } - eds, err := da.ExtendShares(share.ToBytes(dataSquare)) + eds, err := da.ExtendShares(dataSquareBytes) if err != nil { logInvalidPropBlockError(app.Logger(), req.Header, "failure to erasure the data square", err) return reject() diff --git a/app/test/consistent_apphash_test.go b/app/test/consistent_apphash_test.go index 972f42da58..9a108e4dd1 100644 --- a/app/test/consistent_apphash_test.go +++ b/app/test/consistent_apphash_test.go @@ -8,19 +8,25 @@ import ( "github.com/celestiaorg/celestia-app/v3/app" "github.com/celestiaorg/celestia-app/v3/app/encoding" "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v1 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v1" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" "github.com/celestiaorg/celestia-app/v3/pkg/user" testutil "github.com/celestiaorg/celestia-app/v3/test/util" "github.com/celestiaorg/celestia-app/v3/test/util/blobfactory" "github.com/celestiaorg/celestia-app/v3/test/util/testfactory" blobtypes "github.com/celestiaorg/celestia-app/v3/x/blob/types" + blobstreamtypes "github.com/celestiaorg/celestia-app/v3/x/blobstream/types" + signal "github.com/celestiaorg/celestia-app/v3/x/signal/types" "github.com/celestiaorg/go-square/v2/share" "github.com/celestiaorg/go-square/v2/tx" + "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/crypto/hd" "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" + vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" "github.com/cosmos/cosmos-sdk/x/authz" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" crisisTypes "github.com/cosmos/cosmos-sdk/x/crisis/types" @@ -29,72 +35,125 @@ import ( govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + gethcommon "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proto/tendermint/version" ) -type BlobTx struct { +type blobTx struct { author string blobs []*share.Blob txOptions []user.TxOption } -// TestConsistentAppHash executes all state machine messages, generates an app hash, +type ( + encodedSdkMessages func(*testing.T, []sdk.AccAddress, []stakingtypes.Validator, *app.App, *user.Signer, *user.Signer) ([][]byte, [][]byte, [][]byte) + encodedBlobTxs func(*testing.T, *user.Signer, []sdk.AccAddress) []byte +) + +type appHashTest struct { + name string + version uint64 + encodedSdkMessages encodedSdkMessages + encodedBlobTxs encodedBlobTxs + expectedDataRoot []byte + expectedAppHash []byte +} + +// TestConsistentAppHash executes all state machine messages on all app versions, generates an app hash, // and compares it against a previously generated hash from the same set of transactions. // App hashes across different commits should be consistent. func TestConsistentAppHash(t *testing.T) { - // Expected app hash produced by v1.x - https://github.com/celestiaorg/celestia-app/blob/v1.x/app/consistent_apphash_test.go - expectedAppHash := []byte{84, 216, 210, 48, 113, 204, 234, 21, 150, 236, 97, 87, 242, 184, 45, 248, 116, 127, 49, 88, 134, 197, 202, 125, 44, 210, 67, 144, 107, 51, 145, 65} - expectedDataRoot := []byte{100, 59, 112, 241, 238, 49, 50, 64, 105, 90, 209, 211, 49, 254, 211, 83, 133, 88, 5, 89, 221, 116, 141, 72, 33, 110, 16, 78, 5, 48, 118, 72} - - // Initialize testApp - testApp := testutil.NewTestApp() - enc := encoding.MakeConfig(app.ModuleEncodingRegisters...) - - // Create deterministic keys - kr, pubKeys := deterministicKeyRing(enc.Codec) - - // Apply genesis state to the app. - valKeyRing, _, err := testutil.SetupDeterministicGenesisState(testApp, pubKeys, 20_000_000_000, app.DefaultInitialConsensusParams()) - require.NoError(t, err) + tc := []appHashTest{ + { + name: "execute sdk messages and blob tx on v1", + version: v1.Version, + encodedSdkMessages: encodedSdkMessagesV1, + encodedBlobTxs: createEncodedBlobTx, + expectedDataRoot: []byte{30, 142, 46, 120, 191, 30, 242, 150, 164, 242, 166, 245, 89, 183, 181, 41, 88, 197, 11, 19, 243, 46, 69, 97, 3, 51, 27, 133, 68, 95, 95, 121}, + // Expected app hash produced by v1.x - https://github.com/celestiaorg/celestia-app/blob/v1.x/app/consistent_apphash_test.go + expectedAppHash: []byte{57, 128, 107, 57, 6, 131, 221, 188, 181, 181, 135, 58, 37, 240, 135, 66, 199, 107, 80, 154, 240, 176, 57, 36, 238, 69, 25, 188, 86, 203, 145, 145}, + }, + { + name: "execute sdk messages and blob tx on v2", + version: v2.Version, + encodedSdkMessages: func(t *testing.T, accountAddresses []sdk.AccAddress, genValidators []stakingtypes.Validator, testApp *app.App, signer *user.Signer, valSigner *user.Signer) ([][]byte, [][]byte, [][]byte) { + firstBlockEncodedTxs, secondBlockEncodedTxs, thirdBlockEncodedTxs := encodedSdkMessagesV1(t, accountAddresses, genValidators, testApp, signer, valSigner) + encodedMessagesV2 := encodedSdkMessagesV2(t, genValidators, valSigner) + thirdBlockEncodedTxs = append(thirdBlockEncodedTxs, encodedMessagesV2...) + + return firstBlockEncodedTxs, secondBlockEncodedTxs, thirdBlockEncodedTxs + }, + encodedBlobTxs: createEncodedBlobTx, + expectedDataRoot: []byte{200, 61, 245, 166, 119, 211, 170, 2, 73, 239, 253, 97, 243, 112, 116, 196, 70, 41, 201, 172, 123, 28, 15, 182, 52, 222, 122, 243, 95, 97, 66, 233}, + // Expected app hash produced on v2.x - https://github.com/celestiaorg/celestia-app/blob/v2.x/app/test/consistent_apphash_test.go + expectedAppHash: []byte{14, 115, 34, 28, 33, 70, 118, 3, 111, 250, 161, 185, 187, 151, 54, 78, 86, 37, 44, 252, 8, 26, 164, 251, 36, 20, 151, 170, 181, 84, 32, 136}, + }, + } - // ------------ Genesis User Accounts ------------ + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + testApp := testutil.NewTestApp() + enc := encoding.MakeConfig(app.ModuleEncodingRegisters...) + // Create deterministic keys + kr, pubKeys := deterministicKeyRing(enc.Codec) + consensusParams := app.DefaultConsensusParams() + consensusParams.Version.AppVersion = tt.version + // Apply genesis state to the app. + valKeyRing, _, err := testutil.SetupDeterministicGenesisState(testApp, pubKeys, 20_000_000_000, consensusParams) + require.NoError(t, err) + + // Get account names and addresses from the keyring and create signer + signer, accountAddresses := getAccountsAndCreateSigner(t, kr, enc.TxConfig, testutil.ChainID, tt.version, testApp) + // Validators from genesis state + genValidators := testApp.StakingKeeper.GetAllValidators(testApp.NewContext(false, tmproto.Header{})) + valSigner, _ := getAccountsAndCreateSigner(t, valKeyRing, enc.TxConfig, testutil.ChainID, tt.version, testApp) + + // Convert validators to ABCI validators + abciValidators, err := convertToABCIValidators(genValidators) + require.NoError(t, err) + + firstBlockTxs, secondBlockTxs, thirdBlockTxs := tt.encodedSdkMessages(t, accountAddresses, genValidators, testApp, signer, valSigner) + encodedBlobTx := tt.encodedBlobTxs(t, signer, accountAddresses) + + // Execute the first block + _, firstBlockAppHash, err := executeTxs(testApp, []byte{}, firstBlockTxs, abciValidators, testApp.LastCommitID().Hash) + require.NoError(t, err) + // Execute the second block + _, secondBlockAppHash, err := executeTxs(testApp, encodedBlobTx, secondBlockTxs, abciValidators, firstBlockAppHash) + require.NoError(t, err) + // Execute the final block and get the data root alongside the final app hash + finalDataRoot, finalAppHash, err := executeTxs(testApp, []byte{}, thirdBlockTxs, abciValidators, secondBlockAppHash) + require.NoError(t, err) + + // Require that the app hash is equal to the app hash produced on a different commit + require.Equal(t, tt.expectedAppHash, finalAppHash) + // Require that the data root is equal to the data root produced on a different commit + require.Equal(t, tt.expectedDataRoot, finalDataRoot) + }) + } +} +// getAccountsAndCreateSigner returns a signer with accounts +func getAccountsAndCreateSigner(t *testing.T, kr keyring.Keyring, enc client.TxConfig, chainID string, appVersion uint64, testApp *app.App) (*user.Signer, []sdk.AccAddress) { // Get account names and addresses from the keyring accountNames := testfactory.GetAccountNames(kr) accountAddresses := testfactory.GetAddresses(kr) - // Query keyring account infos accountInfos := queryAccountInfo(testApp, accountNames, kr) - // Create accounts for the signer accounts := createAccounts(accountInfos, accountNames) - // Create a signer with accounts - signer, err := user.NewSigner(kr, enc.TxConfig, testutil.ChainID, app.DefaultInitialVersion, accounts...) - require.NoError(t, err) - - // ------------ Genesis Validator Accounts ------------ - - // Validators from genesis state - genValidators := testApp.StakingKeeper.GetAllValidators(testApp.NewContext(false, tmproto.Header{})) - - // Get validator account names from the validator keyring - valAccountNames := testfactory.GetAccountNames(valKeyRing) - - // Query validator account infos - valAccountInfos := queryAccountInfo(testApp, valAccountNames, valKeyRing) - - // Create accounts for the validators' signer - valAccounts := createAccounts(valAccountInfos, valAccountNames) - - // Create a signer with validator accounts - valSigner, err := user.NewSigner(valKeyRing, enc.TxConfig, testutil.ChainID, app.DefaultInitialVersion, valAccounts...) + signer, err := user.NewSigner(kr, enc, chainID, appVersion, accounts...) require.NoError(t, err) + return signer, accountAddresses +} - // ----------- Create SDK Messages ------------ +// encodedSdkMessagesV1 returns encoded SDK messages for v1 +func encodedSdkMessagesV1(t *testing.T, accountAddresses []sdk.AccAddress, genValidators []stakingtypes.Validator, testApp *app.App, signer *user.Signer, valSigner *user.Signer) ([][]byte, [][]byte, [][]byte) { + // ----------- Create v1 SDK Messages ------------ amount := sdk.NewCoins(sdk.NewCoin(app.BondDenom, sdk.NewIntFromUint64(1_000))) // Minimum deposit required for a gov proposal to become active @@ -242,6 +301,27 @@ func TestConsistentAppHash(t *testing.T) { msgWithdrawDelegatorReward := distribution.NewMsgWithdrawDelegatorReward(accountAddresses[0], genValidators[0].GetOperator()) secondBlockSdkMsgs = append(secondBlockSdkMsgs, msgWithdrawDelegatorReward) + // NewMsgCreatePeriodicVestingAccount - creates a periodic vesting account + newAddress := sdk.AccAddress(ed25519.GenPrivKeyFromSecret([]byte("anotherAddress")).PubKey().Address()) + vestingPeriod := []vestingtypes.Period{ + { + Length: 3600, + Amount: amount, + }, + } + msgCreatePeriodicVestingAccount := vestingtypes.NewMsgCreatePeriodicVestingAccount(accountAddresses[3], newAddress, 2, vestingPeriod) + secondBlockSdkMsgs = append(secondBlockSdkMsgs, msgCreatePeriodicVestingAccount) + + // NewMsgCreatePermanentLockedAccount - creates a permanent locked account + newAddress = sdk.AccAddress(ed25519.GenPrivKeyFromSecret([]byte("anotherAddress2")).PubKey().Address()) + msgCreatePermamentLockedAccount := vestingtypes.NewMsgCreatePermanentLockedAccount(accountAddresses[3], newAddress, amount) + secondBlockSdkMsgs = append(secondBlockSdkMsgs, msgCreatePermamentLockedAccount) + + // NewMsgCreateVestingAccount - creates a vesting account + newAddress = sdk.AccAddress(ed25519.GenPrivKeyFromSecret([]byte("anotherAddress3")).PubKey().Address()) + msgCreateVestingAccount := vestingtypes.NewMsgCreateVestingAccount(accountAddresses[3], newAddress, amount, 1, 2, false) + secondBlockSdkMsgs = append(secondBlockSdkMsgs, msgCreateVestingAccount) + // ------------ Third Block ------------ // Txs within the third block are signed by the validator's signer @@ -255,51 +335,53 @@ func TestConsistentAppHash(t *testing.T) { msgUnjail := slashingtypes.NewMsgUnjail(genValidators[3].GetOperator()) thirdBlockSdkMsgs = append(thirdBlockSdkMsgs, msgUnjail) - // ------------ Construct Txs ------------ + // NewMsgRegisterEVMAddress - registers an EVM address + // This message was removed in v2 + if testApp.AppVersion() == v1.Version { + msgRegisterEVMAddress := blobstreamtypes.NewMsgRegisterEVMAddress(genValidators[1].GetOperator(), gethcommon.HexToAddress("hi")) + thirdBlockSdkMsgs = append(thirdBlockSdkMsgs, msgRegisterEVMAddress) + } - // Create SDK transactions from the list of messages - // and separate them into 3 different blocks - firstBlockEncodedTxs, err := processSdkMessages(signer, firstBlockSdkMsgs) + firstBlockTxs, err := processSdkMessages(signer, firstBlockSdkMsgs) require.NoError(t, err) - - secondBlockEncodedTxs, err := processSdkMessages(signer, secondBlockSdkMsgs) + secondBlockTxs, err := processSdkMessages(signer, secondBlockSdkMsgs) + require.NoError(t, err) + thirdBlockTxs, err := processSdkMessages(valSigner, thirdBlockSdkMsgs) require.NoError(t, err) - thirdBlockEncodedTxs, err := processSdkMessages(valSigner, thirdBlockSdkMsgs) + return firstBlockTxs, secondBlockTxs, thirdBlockTxs +} + +// encodedSdkMessagesV2 returns encoded SDK messages introduced in v2 +func encodedSdkMessagesV2(t *testing.T, genValidators []stakingtypes.Validator, valSigner *user.Signer) [][]byte { + var v2Messages []sdk.Msg + msgTryUpgrade := signal.NewMsgTryUpgrade(sdk.AccAddress(genValidators[0].GetOperator())) + v2Messages = append(v2Messages, msgTryUpgrade) + + msgSignalVersion := signal.NewMsgSignalVersion(genValidators[0].GetOperator(), 2) + v2Messages = append(v2Messages, msgSignalVersion) + + encodedTxs, err := processSdkMessages(valSigner, v2Messages) require.NoError(t, err) + return encodedTxs +} + +// createEncodedBlobTx creates, signs and returns an encoded blob transaction +func createEncodedBlobTx(t *testing.T, signer *user.Signer, accountAddresses []sdk.AccAddress) []byte { + senderAcc := signer.AccountByAddress(accountAddresses[1]) blob, err := share.NewBlob(fixedNamespace(), []byte{1}, appconsts.DefaultShareVersion, nil) require.NoError(t, err) // Create a Blob Tx - blobTx := BlobTx{ - author: accountNames[1], + blobTx := blobTx{ + author: senderAcc.Name(), blobs: []*share.Blob{blob}, txOptions: blobfactory.DefaultTxOpts(), } encodedBlobTx, _, err := signer.CreatePayForBlobs(blobTx.author, blobTx.blobs, blobTx.txOptions...) require.NoError(t, err) - - // Convert validators to ABCI validators - abciValidators, err := convertToABCIValidators(genValidators) - require.NoError(t, err) - - // Execute the first block - _, firstBlockAppHash, err := executeTxs(testApp, []byte{}, firstBlockEncodedTxs, abciValidators, testApp.LastCommitID().Hash) - require.NoError(t, err) - - // Execute the second block - _, secondBlockAppHash, err := executeTxs(testApp, encodedBlobTx, secondBlockEncodedTxs, abciValidators, firstBlockAppHash) - require.NoError(t, err) - - // Execute the final block and get the data root alongside the final app hash - finalDataRoot, finalAppHash, err := executeTxs(testApp, []byte{}, thirdBlockEncodedTxs, abciValidators, secondBlockAppHash) - require.NoError(t, err) - - // Require that the app hash is equal to the app hash produced on a different commit - require.Equal(t, expectedAppHash, finalAppHash) - // Require that the data root is equal to the data root produced on a different commit - require.Equal(t, expectedDataRoot, finalDataRoot) + return encodedBlobTx } // fixedNamespace returns a hardcoded namespace @@ -388,7 +470,7 @@ func executeTxs(testApp *app.App, encodedBlobTx []byte, encodedSdkTxs [][]byte, dataHash := resPrepareProposal.BlockData.Hash header := tmproto.Header{ - Version: version.Consensus{App: 1}, + Version: version.Consensus{App: testApp.AppVersion()}, DataHash: resPrepareProposal.BlockData.Hash, ChainID: chainID, Time: genesisTime.Add(time.Duration(height) * time.Minute), diff --git a/app/test/prepare_proposal_test.go b/app/test/prepare_proposal_test.go index 8bfcc281f8..f274d9da6a 100644 --- a/app/test/prepare_proposal_test.go +++ b/app/test/prepare_proposal_test.go @@ -1,9 +1,21 @@ package app_test import ( + "crypto/rand" + "strings" "testing" "time" + blobtypes "github.com/celestiaorg/celestia-app/v3/x/blob/types" + blobtx "github.com/celestiaorg/go-square/v2/tx" + + "github.com/celestiaorg/celestia-app/v3/pkg/user" + "github.com/celestiaorg/celestia-app/v3/test/util/testnode" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/stretchr/testify/assert" + tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/cosmos/cosmos-sdk/crypto/hd" @@ -39,6 +51,7 @@ func TestPrepareProposalPutsPFBsAtEnd(t *testing.T) { accnts[:numBlobTxs], infos[:numBlobTxs], testfactory.Repeat([]*share.Blob{protoBlob}, numBlobTxs), + blobfactory.DefaultTxOpts()..., ) normalTxs := testutil.SendTxsWithAccounts( @@ -96,6 +109,7 @@ func TestPrepareProposalFiltering(t *testing.T) { testfactory.RandomBlobNamespaces(tmrand.NewRand(), 3), [][]int{{100}, {1000}, {420}}, ), + blobfactory.DefaultTxOpts()..., ) // create 3 MsgSend transactions that are signed with valid account numbers @@ -137,6 +151,44 @@ func TestPrepareProposalFiltering(t *testing.T) { require.NoError(t, err) noAccountTx := []byte(testutil.SendTxWithManualSequence(t, encConf.TxConfig, kr, nilAccount, accounts[0], 1000, "", 0, 6)) + // create a tx that can't be included in a 64 x 64 when accounting for the + // pfb along with the shares + tooManyShareBtx := blobfactory.ManyMultiBlobTx( + t, + encConf.TxConfig, + kr, + testutil.ChainID, + accounts[3:4], + infos[3:4], + blobfactory.NestedBlobs( + t, + testfactory.RandomBlobNamespaces(tmrand.NewRand(), 4000), + [][]int{repeat(4000, 1)}, + ), + )[0] + + // memo is 2 MiB resulting in the transaction being over limit + largeString := strings.Repeat("a", 2*1024*1024) + + // 3 transactions over MaxTxSize limit + largeTxs := coretypes.Txs(testutil.SendTxsWithAccounts(t, testApp, encConf.TxConfig, kr, 1000, accounts[0], accounts[:3], testutil.ChainID, user.SetMemo(largeString))).ToSliceOfBytes() + + // 3 blobTxs over MaxTxSize limit + largeBlobTxs := blobfactory.ManyMultiBlobTx( + t, + encConf.TxConfig, + kr, + testutil.ChainID, + accounts[:3], + infos[:3], + blobfactory.NestedBlobs( + t, + testfactory.RandomBlobNamespaces(tmrand.NewRand(), 3), + [][]int{{100}, {1000}, {420}}, + ), + user.SetMemo(largeString), + ) + type test struct { name string txs func() [][]byte @@ -181,6 +233,20 @@ func TestPrepareProposalFiltering(t *testing.T) { }, prunedTxs: [][]byte{noAccountTx}, }, + { + name: "blob tx with too many shares", + txs: func() [][]byte { + return [][]byte{tooManyShareBtx} + }, + prunedTxs: [][]byte{tooManyShareBtx}, + }, + { + name: "blobTxs and sendTxs that exceed MaxTxSize limit", + txs: func() [][]byte { + return append(largeTxs, largeBlobTxs...) // All txs are over MaxTxSize limit + }, + prunedTxs: append(largeTxs, largeBlobTxs...), + }, } for _, tt := range tests { @@ -204,6 +270,148 @@ func TestPrepareProposalFiltering(t *testing.T) { } } +func TestPrepareProposalCappingNumberOfMessages(t *testing.T) { + if testing.Short() { + t.Skip("skipping prepare proposal capping number of transactions test in short mode.") + } + // creating a big number of accounts so that every account + // only creates a single transaction. This is for transactions + // to be skipped without worrying about the sequence number being + // sequential. + numberOfAccounts := 8000 + accounts := testnode.GenerateAccounts(numberOfAccounts) + consensusParams := app.DefaultConsensusParams() + testApp, kr := testutil.SetupTestAppWithGenesisValSetAndMaxSquareSize(consensusParams, 128, accounts...) + enc := encoding.MakeConfig(app.ModuleEncodingRegisters...) + + addrs := make([]sdk.AccAddress, 0, numberOfAccounts) + accs := make([]types.AccountI, 0, numberOfAccounts) + signers := make([]*user.Signer, 0, numberOfAccounts) + for index, account := range accounts { + addr := testfactory.GetAddress(kr, account) + addrs = append(addrs, addr) + acc := testutil.DirectQueryAccount(testApp, addrs[index]) + accs = append(accs, acc) + signer, err := user.NewSigner(kr, enc.TxConfig, testutil.ChainID, appconsts.LatestVersion, user.NewAccount(account, acc.GetAccountNumber(), acc.GetSequence())) + require.NoError(t, err) + signers = append(signers, signer) + } + + numberOfPFBs := appconsts.MaxPFBMessages + 500 + pfbTxs := make([][]byte, 0, numberOfPFBs) + randomBytes := make([]byte, 2000) + _, err := rand.Read(randomBytes) + require.NoError(t, err) + accountIndex := 0 + for i := 0; i < numberOfPFBs; i++ { + blob, err := share.NewBlob(share.RandomNamespace(), randomBytes, 1, accs[accountIndex].GetAddress().Bytes()) + require.NoError(t, err) + tx, _, err := signers[accountIndex].CreatePayForBlobs(accounts[accountIndex], []*share.Blob{blob}, user.SetGasLimit(2549760000), user.SetFee(10000)) + require.NoError(t, err) + pfbTxs = append(pfbTxs, tx) + accountIndex++ + } + + multiPFBsPerTxs := make([][]byte, 0, numberOfPFBs) + numberOfMsgsPerTx := 10 + for i := 0; i < numberOfPFBs; i++ { + msgs := make([]sdk.Msg, 0) + blobs := make([]*share.Blob, 0) + for j := 0; j < numberOfMsgsPerTx; j++ { + blob, err := share.NewBlob(share.RandomNamespace(), randomBytes, 1, accs[accountIndex].GetAddress().Bytes()) + require.NoError(t, err) + msg, err := blobtypes.NewMsgPayForBlobs(addrs[accountIndex].String(), appconsts.LatestVersion, blob) + require.NoError(t, err) + msgs = append(msgs, msg) + blobs = append(blobs, blob) + } + txBytes, err := signers[accountIndex].CreateTx(msgs, user.SetGasLimit(2549760000), user.SetFee(10000)) + require.NoError(t, err) + blobTx, err := blobtx.MarshalBlobTx(txBytes, blobs...) + require.NoError(t, err) + multiPFBsPerTxs = append(multiPFBsPerTxs, blobTx) + accountIndex++ + } + + numberOfMsgSends := appconsts.MaxNonPFBMessages + 500 + msgSendTxs := make([][]byte, 0, numberOfMsgSends) + for i := 0; i < numberOfMsgSends; i++ { + msg := banktypes.NewMsgSend( + addrs[accountIndex], + testnode.RandomAddress().(sdk.AccAddress), + sdk.NewCoins(sdk.NewInt64Coin(appconsts.BondDenom, 10)), + ) + rawTx, err := signers[accountIndex].CreateTx([]sdk.Msg{msg}, user.SetGasLimit(1000000), user.SetFee(10)) + require.NoError(t, err) + msgSendTxs = append(msgSendTxs, rawTx) + accountIndex++ + } + + testCases := []struct { + name string + inputTransactions [][]byte + expectedTransactions [][]byte + }{ + { + name: "capping only PFB transactions", + inputTransactions: pfbTxs[:appconsts.MaxPFBMessages+50], + expectedTransactions: pfbTxs[:appconsts.MaxPFBMessages], + }, + { + name: "capping only PFB transactions with multiple messages", + inputTransactions: multiPFBsPerTxs[:appconsts.MaxPFBMessages], + expectedTransactions: multiPFBsPerTxs[:appconsts.MaxPFBMessages/numberOfMsgsPerTx], + }, + { + name: "capping only msg send transactions", + inputTransactions: msgSendTxs[:appconsts.MaxNonPFBMessages+50], + expectedTransactions: msgSendTxs[:appconsts.MaxNonPFBMessages], + }, + { + name: "capping msg send after pfb transactions", + inputTransactions: func() [][]byte { + input := make([][]byte, 0, len(msgSendTxs)+100) + input = append(input, pfbTxs[:100]...) + input = append(input, msgSendTxs...) + return input + }(), + expectedTransactions: func() [][]byte { + expected := make([][]byte, 0, appconsts.MaxNonPFBMessages+100) + expected = append(expected, msgSendTxs[:appconsts.MaxNonPFBMessages]...) + expected = append(expected, pfbTxs[:100]...) + return expected + }(), + }, + { + name: "capping pfb after msg send transactions", + inputTransactions: func() [][]byte { + input := make([][]byte, 0, len(pfbTxs)+100) + input = append(input, msgSendTxs[:100]...) + input = append(input, pfbTxs...) + return input + }(), + expectedTransactions: func() [][]byte { + expected := make([][]byte, 0, appconsts.MaxPFBMessages+100) + expected = append(expected, msgSendTxs[:100]...) + expected = append(expected, pfbTxs[:appconsts.MaxPFBMessages]...) + return expected + }(), + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + resp := testApp.PrepareProposal(abci.RequestPrepareProposal{ + BlockData: &tmproto.Data{ + Txs: testCase.inputTransactions, + }, + ChainId: testApp.GetChainID(), + Height: 10, + }) + assert.Equal(t, testCase.expectedTransactions, resp.BlockData.Txs) + }) + } +} + func queryAccountInfo(capp *app.App, accs []string, kr keyring.Keyring) []blobfactory.AccountInfo { infos := make([]blobfactory.AccountInfo, len(accs)) for i, acc := range accs { @@ -216,3 +424,12 @@ func queryAccountInfo(capp *app.App, accs []string, kr keyring.Keyring) []blobfa } return infos } + +// repeat returns a slice of length n with each element set to val. +func repeat[T any](n int, val T) []T { + result := make([]T, n) + for i := range result { + result[i] = val + } + return result +} diff --git a/app/test/priority_test.go b/app/test/priority_test.go index 72586e5718..da7936dd0d 100644 --- a/app/test/priority_test.go +++ b/app/test/priority_test.go @@ -80,7 +80,6 @@ func (s *PriorityTestSuite) TestPriorityByGasPrice() { wg := &sync.WaitGroup{} for _, accName := range s.accountNames { wg.Add(1) - accName := accName // new variable per iteration go func() { defer wg.Done() // ensure that it is greater than the min gas price diff --git a/app/test/process_proposal_test.go b/app/test/process_proposal_test.go index df390dcdfa..69f18b8bd4 100644 --- a/app/test/process_proposal_test.go +++ b/app/test/process_proposal_test.go @@ -3,6 +3,7 @@ package app_test import ( "bytes" "fmt" + "strings" "testing" "time" @@ -20,6 +21,7 @@ import ( "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" v1 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v1" v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" "github.com/celestiaorg/celestia-app/v3/pkg/da" "github.com/celestiaorg/celestia-app/v3/pkg/user" testutil "github.com/celestiaorg/celestia-app/v3/test/util" @@ -49,6 +51,26 @@ func TestProcessProposal(t *testing.T) { testfactory.RandomBlobNamespaces(tmrand.NewRand(), 4), [][]int{{100}, {1000}, {420}, {300}}, ), + blobfactory.DefaultTxOpts()..., + ) + + largeMemo := strings.Repeat("a", appconsts.MaxTxSize(appconsts.LatestVersion)) + + // create 2 single blobTxs that include a large memo making the transaction + // larger than the configured max tx size + largeBlobTxs := blobfactory.ManyMultiBlobTx( + t, enc, kr, testutil.ChainID, accounts[3:], infos[3:], + blobfactory.NestedBlobs( + t, + testfactory.RandomBlobNamespaces(tmrand.NewRand(), 4), + [][]int{{100}, {1000}, {420}, {300}}, + ), + user.SetMemo(largeMemo)) + + // create 1 large sendTx that includes a large memo making the + // transaction over the configured max tx size limit + largeSendTx := testutil.SendTxsWithAccounts( + t, testApp, enc, kr, 1000, accounts[0], accounts[1:2], testutil.ChainID, user.SetMemo(largeMemo), ) // create 3 MsgSend transactions that are signed with valid account numbers @@ -80,6 +102,20 @@ func TestProcessProposal(t *testing.T) { ns1 := share.MustNewV0Namespace(bytes.Repeat([]byte{1}, share.NamespaceVersionZeroIDSize)) data := bytes.Repeat([]byte{1}, 13) + tooManyShareBtx := blobfactory.ManyMultiBlobTx( + t, + enc, + kr, + testutil.ChainID, + accounts[3:4], + infos[3:4], + blobfactory.NestedBlobs( + t, + testfactory.RandomBlobNamespaces(tmrand.NewRand(), 4000), + [][]int{repeat(4000, 1)}, + ), + )[0] + type test struct { name string input *tmproto.Data @@ -299,6 +335,39 @@ func TestProcessProposal(t *testing.T) { appVersion: appconsts.LatestVersion, expectedResult: abci.ResponseProcessProposal_REJECT, }, + { + name: "blob tx that takes up too many shares", + input: &tmproto.Data{ + Txs: [][]byte{}, + }, + mutator: func(d *tmproto.Data) { + // this tx will get filtered out by prepare proposal before this + // so we add it here + d.Txs = append(d.Txs, tooManyShareBtx) + }, + appVersion: v3.Version, + expectedResult: abci.ResponseProcessProposal_REJECT, + }, + { + name: "blob txs larger than configured max tx size", + input: validData(), + mutator: func(d *tmproto.Data) { + d.Txs = append(d.Txs, largeBlobTxs...) + d.Hash = calculateNewDataHash(t, d.Txs) + }, + appVersion: appconsts.LatestVersion, + expectedResult: abci.ResponseProcessProposal_REJECT, + }, + { + name: "send tx larger than configured max tx size", + input: validData(), + mutator: func(d *tmproto.Data) { + d.Txs = append(coretypes.Txs(largeSendTx).ToSliceOfBytes(), d.Txs...) + d.Hash = calculateNewDataHash(t, d.Txs) + }, + appVersion: appconsts.LatestVersion, + expectedResult: abci.ResponseProcessProposal_REJECT, + }, } for _, tt := range tests { diff --git a/app/test/std_sdk_test.go b/app/test/std_sdk_test.go index 75eb932b3f..dc54a030d0 100644 --- a/app/test/std_sdk_test.go +++ b/app/test/std_sdk_test.go @@ -8,7 +8,7 @@ import ( "github.com/celestiaorg/celestia-app/v3/app" "github.com/celestiaorg/celestia-app/v3/app/encoding" "github.com/celestiaorg/celestia-app/v3/app/grpc/tx" - v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/pkg/user" "github.com/celestiaorg/celestia-app/v3/test/util/blobfactory" "github.com/celestiaorg/celestia-app/v3/test/util/testfactory" @@ -307,7 +307,7 @@ func (s *StandardSDKIntegrationTestSuite) TestStandardSDK() { name: "signal a version change", msgFunc: func() (msgs []sdk.Msg, signer string) { valAccount := s.getValidatorAccount() - msg := signal.NewMsgSignalVersion(valAccount, 2) + msg := signal.NewMsgSignalVersion(valAccount, appconsts.LatestVersion+1) return []sdk.Msg{msg}, s.getValidatorName() }, expectedCode: abci.CodeTypeOK, @@ -349,7 +349,7 @@ func (s *StandardSDKIntegrationTestSuite) TestGRPCQueries() { require.NoError(t, err) got, err := resp.NetworkMinGasPrice.Float64() require.NoError(t, err) - assert.Equal(t, v2.NetworkMinGasPrice, got) + assert.Equal(t, appconsts.DefaultNetworkMinGasPrice, got) }) t.Run("testnode can query local min gas price", func(t *testing.T) { serviceClient := nodeservice.NewServiceClient(s.cctx.GRPCClient) diff --git a/app/test/upgrade_test.go b/app/test/upgrade_test.go index 55de8fc486..4c2677c8e0 100644 --- a/app/test/upgrade_test.go +++ b/app/test/upgrade_test.go @@ -1,21 +1,27 @@ package app_test import ( - "encoding/json" "fmt" "strings" "testing" - "time" app "github.com/celestiaorg/celestia-app/v3/app" "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" v1 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v1" v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" + "github.com/celestiaorg/celestia-app/v3/pkg/user" "github.com/celestiaorg/celestia-app/v3/test/util" + "github.com/celestiaorg/celestia-app/v3/test/util/genesis" + "github.com/celestiaorg/celestia-app/v3/test/util/testnode" blobstreamtypes "github.com/celestiaorg/celestia-app/v3/x/blobstream/types" "github.com/celestiaorg/celestia-app/v3/x/minfee" - "github.com/cosmos/cosmos-sdk/crypto/keyring" + signaltypes "github.com/celestiaorg/celestia-app/v3/x/signal/types" + "github.com/celestiaorg/go-square/v2/share" + "github.com/celestiaorg/go-square/v2/tx" sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/cosmos-sdk/x/params/types/proposal" packetforwardtypes "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v6/packetforward/types" icahosttypes "github.com/cosmos/ibc-go/v6/modules/apps/27-interchain-accounts/host/types" @@ -27,10 +33,134 @@ import ( dbm "github.com/tendermint/tm-db" ) -// TestAppUpgrades verifies that the all module's params are overridden during an +func TestAppUpgradeV3(t *testing.T) { + if testing.Short() { + t.Skip("skipping TestAppUpgradeV3 in short mode") + } + + appconsts.OverrideUpgradeHeightDelayStr = "1" + defer func() { appconsts.OverrideUpgradeHeightDelayStr = "" }() + + testApp, genesis := SetupTestAppWithUpgradeHeight(t, 3) + upgradeFromV1ToV2(t, testApp) + + ctx := testApp.NewContext(true, tmproto.Header{}) + validators := testApp.StakingKeeper.GetAllValidators(ctx) + valAddr, err := sdk.ValAddressFromBech32(validators[0].OperatorAddress) + require.NoError(t, err) + record, err := genesis.Keyring().Key(testnode.DefaultValidatorAccountName) + require.NoError(t, err) + accAddr, err := record.GetAddress() + require.NoError(t, err) + encCfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + resp, err := testApp.AccountKeeper.Account(ctx, &authtypes.QueryAccountRequest{ + Address: accAddr.String(), + }) + require.NoError(t, err) + var account authtypes.AccountI + err = encCfg.InterfaceRegistry.UnpackAny(resp.Account, &account) + require.NoError(t, err) + + signer, err := user.NewSigner( + genesis.Keyring(), encCfg.TxConfig, testApp.GetChainID(), v3.Version, + user.NewAccount(testnode.DefaultValidatorAccountName, account.GetAccountNumber(), account.GetSequence()), + ) + require.NoError(t, err) + + upgradeTx, err := signer.CreateTx( + []sdk.Msg{ + signaltypes.NewMsgSignalVersion(valAddr, 3), + signaltypes.NewMsgTryUpgrade(accAddr), + }, + user.SetGasLimitAndGasPrice(100_000, appconsts.DefaultMinGasPrice), + ) + require.NoError(t, err) + testApp.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + ChainID: genesis.ChainID, + Height: 3, + Version: tmversion.Consensus{App: 2}, + }, + }) + + deliverTxResp := testApp.DeliverTx(abci.RequestDeliverTx{ + Tx: upgradeTx, + }) + require.Equal(t, abci.CodeTypeOK, deliverTxResp.Code, deliverTxResp.Log) + + endBlockResp := testApp.EndBlock(abci.RequestEndBlock{ + Height: 3, + }) + require.Equal(t, v2.Version, endBlockResp.ConsensusParamUpdates.Version.AppVersion) + require.Equal(t, appconsts.GetTimeoutCommit(v2.Version), + endBlockResp.Timeouts.TimeoutCommit) + require.Equal(t, appconsts.GetTimeoutPropose(v2.Version), + endBlockResp.Timeouts.TimeoutPropose) + testApp.Commit() + require.NoError(t, signer.IncrementSequence(testnode.DefaultValidatorAccountName)) + + ctx = testApp.NewContext(true, tmproto.Header{}) + getUpgradeResp, err := testApp.SignalKeeper.GetUpgrade(ctx, &signaltypes.QueryGetUpgradeRequest{}) + require.NoError(t, err) + require.Equal(t, v3.Version, getUpgradeResp.Upgrade.AppVersion) + + // brace yourselfs, this part may take a while + initialHeight := int64(4) + for height := initialHeight; height < initialHeight+appconsts.UpgradeHeightDelay(v2.Version); height++ { + appVersion := v2.Version + _ = testApp.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + Height: height, + Version: tmversion.Consensus{App: appVersion}, + }, + }) + + endBlockResp = testApp.EndBlock(abci.RequestEndBlock{ + Height: 3 + appconsts.UpgradeHeightDelay(v2.Version), + }) + + require.Equal(t, appconsts.GetTimeoutCommit(appVersion), endBlockResp.Timeouts.TimeoutCommit) + require.Equal(t, appconsts.GetTimeoutPropose(appVersion), endBlockResp.Timeouts.TimeoutPropose) + + _ = testApp.Commit() + } + require.Equal(t, v3.Version, endBlockResp.ConsensusParamUpdates.Version.AppVersion) + + // confirm that an authored blob tx works + blob, err := share.NewV1Blob(share.RandomBlobNamespace(), []byte("hello world"), accAddr.Bytes()) + require.NoError(t, err) + blobTxBytes, _, err := signer.CreatePayForBlobs( + testnode.DefaultValidatorAccountName, + []*share.Blob{blob}, + user.SetGasLimitAndGasPrice(200_000, appconsts.DefaultMinGasPrice), + ) + require.NoError(t, err) + blobTx, _, err := tx.UnmarshalBlobTx(blobTxBytes) + require.NoError(t, err) + + _ = testApp.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + ChainID: genesis.ChainID, + Height: initialHeight + appconsts.UpgradeHeightDelay(v3.Version), + Version: tmversion.Consensus{App: 3}, + }, + }) + + deliverTxResp = testApp.DeliverTx(abci.RequestDeliverTx{ + Tx: blobTx.Tx, + }) + require.Equal(t, abci.CodeTypeOK, deliverTxResp.Code, deliverTxResp.Log) + + respEndBlock := testApp.EndBlock(abci. + RequestEndBlock{Height: initialHeight + appconsts.UpgradeHeightDelay(v3.Version)}) + require.Equal(t, appconsts.GetTimeoutCommit(v3.Version), respEndBlock.Timeouts.TimeoutCommit) + require.Equal(t, appconsts.GetTimeoutPropose(v3.Version), respEndBlock.Timeouts.TimeoutPropose) +} + +// TestAppUpgradeV2 verifies that the all module's params are overridden during an // upgrade from v1 -> v2 and the app version changes correctly. -func TestAppUpgrades(t *testing.T) { - NetworkMinGasPriceDec, err := sdk.NewDecFromStr(fmt.Sprintf("%f", v2.NetworkMinGasPrice)) +func TestAppUpgradeV2(t *testing.T) { + NetworkMinGasPriceDec, err := sdk.NewDecFromStr(fmt.Sprintf("%f", appconsts.DefaultNetworkMinGasPrice)) require.NoError(t, err) tests := []struct { @@ -122,19 +252,18 @@ func TestBlobstreamRemovedInV2(t *testing.T) { require.Error(t, err) } -func SetupTestAppWithUpgradeHeight(t *testing.T, upgradeHeight int64) (*app.App, keyring.Keyring) { +func SetupTestAppWithUpgradeHeight(t *testing.T, upgradeHeight int64) (*app.App, *genesis.Genesis) { t.Helper() db := dbm.NewMemDB() - chainID := "test_chain" encCfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) testApp := app.New(log.NewNopLogger(), db, nil, 0, encCfg, upgradeHeight, util.EmptyAppOptions{}) - genesisState, _, kr := util.GenesisStateWithSingleValidator(testApp, "account") - stateBytes, err := json.MarshalIndent(genesisState, "", " ") + genesis := genesis.NewDefaultGenesis(). + WithValidators(genesis.NewDefaultValidator(testnode.DefaultValidatorAccountName)). + WithConsensusParams(app.DefaultInitialConsensusParams()) + genDoc, err := genesis.Export() require.NoError(t, err) - infoResp := testApp.Info(abci.RequestInfo{}) - require.EqualValues(t, 0, infoResp.AppVersion) - cp := app.DefaultInitialConsensusParams() + cp := genDoc.ConsensusParams abciParams := &abci.ConsensusParams{ Block: &abci.BlockParams{ MaxBytes: cp.Block.MaxBytes, @@ -147,23 +276,26 @@ func SetupTestAppWithUpgradeHeight(t *testing.T, upgradeHeight int64) (*app.App, _ = testApp.InitChain( abci.RequestInitChain{ - Time: time.Now(), + Time: genDoc.GenesisTime, Validators: []abci.ValidatorUpdate{}, ConsensusParams: abciParams, - AppStateBytes: stateBytes, - ChainId: chainID, + AppStateBytes: genDoc.AppState, + ChainId: genDoc.ChainID, }, ) // assert that the chain starts with version provided in genesis - infoResp = testApp.Info(abci.RequestInfo{}) - require.EqualValues(t, app.DefaultInitialConsensusParams().Version.AppVersion, infoResp.AppVersion) + infoResp := testApp.Info(abci.RequestInfo{}) + appVersion := app.DefaultInitialConsensusParams().Version.AppVersion + require.EqualValues(t, appVersion, infoResp.AppVersion) + require.EqualValues(t, appconsts.GetTimeoutCommit(appVersion), infoResp.Timeouts.TimeoutCommit) + require.EqualValues(t, appconsts.GetTimeoutPropose(appVersion), infoResp.Timeouts.TimeoutPropose) - supportedVersions := []uint64{v1.Version, v2.Version} + supportedVersions := []uint64{v1.Version, v2.Version, v3.Version} require.Equal(t, supportedVersions, testApp.SupportedVersions()) _ = testApp.Commit() - return testApp, kr + return testApp, genesis } func upgradeFromV1ToV2(t *testing.T, testApp *app.App) { @@ -172,7 +304,11 @@ func upgradeFromV1ToV2(t *testing.T, testApp *app.App) { Height: 2, Version: tmversion.Consensus{App: 1}, }}) - testApp.EndBlock(abci.RequestEndBlock{Height: 2}) + endBlockResp := testApp.EndBlock(abci.RequestEndBlock{Height: 2}) + require.Equal(t, appconsts.GetTimeoutCommit(v1.Version), + endBlockResp.Timeouts.TimeoutCommit) + require.Equal(t, appconsts.GetTimeoutPropose(v1.Version), + endBlockResp.Timeouts.TimeoutPropose) testApp.Commit() require.EqualValues(t, 2, testApp.AppVersion()) } diff --git a/app/validate_txs.go b/app/validate_txs.go index 3538e221a0..ee3edfbb6f 100644 --- a/app/validate_txs.go +++ b/app/validate_txs.go @@ -1,6 +1,7 @@ package app import ( + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/go-square/v2/tx" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/telemetry" @@ -44,12 +45,24 @@ func FilterTxs(logger log.Logger, ctx sdk.Context, handler sdk.AnteHandler, txCo // function used to apply the ante handler. func filterStdTxs(logger log.Logger, dec sdk.TxDecoder, ctx sdk.Context, handler sdk.AnteHandler, txs [][]byte) ([][]byte, sdk.Context) { n := 0 + nonPFBMessageCount := 0 for _, tx := range txs { sdkTx, err := dec(tx) if err != nil { logger.Error("decoding already checked transaction", "tx", tmbytes.HexBytes(coretypes.Tx(tx).Hash()), "error", err) continue } + + // Set the tx size on the context before calling the AnteHandler + ctx = ctx.WithTxBytes(tx) + + msgTypes := msgTypes(sdkTx) + if nonPFBMessageCount+len(sdkTx.GetMsgs()) > appconsts.MaxNonPFBMessages { + logger.Debug("skipping tx because the max non PFB message count was reached", "tx", tmbytes.HexBytes(coretypes.Tx(tx).Hash())) + continue + } + nonPFBMessageCount += len(sdkTx.GetMsgs()) + ctx, err = handler(ctx, sdkTx, false) // either the transaction is invalid (ie incorrect nonce) and we // simply want to remove this tx, or we're catching a panic from one @@ -59,7 +72,7 @@ func filterStdTxs(logger log.Logger, dec sdk.TxDecoder, ctx sdk.Context, handler "filtering already checked transaction", "tx", tmbytes.HexBytes(coretypes.Tx(tx).Hash()), "error", err, - "msgs", msgTypes(sdkTx), + "msgs", msgTypes, ) telemetry.IncrCounter(1, "prepare_proposal", "invalid_std_txs") continue @@ -77,12 +90,23 @@ func filterStdTxs(logger log.Logger, dec sdk.TxDecoder, ctx sdk.Context, handler // function used to apply the ante handler. func filterBlobTxs(logger log.Logger, dec sdk.TxDecoder, ctx sdk.Context, handler sdk.AnteHandler, txs []*tx.BlobTx) ([]*tx.BlobTx, sdk.Context) { n := 0 + pfbMessageCount := 0 for _, tx := range txs { sdkTx, err := dec(tx.Tx) if err != nil { logger.Error("decoding already checked blob transaction", "tx", tmbytes.HexBytes(coretypes.Tx(tx.Tx).Hash()), "error", err) continue } + + // Set the tx size on the context before calling the AnteHandler + ctx = ctx.WithTxBytes(tx.Tx) + + if pfbMessageCount+len(sdkTx.GetMsgs()) > appconsts.MaxPFBMessages { + logger.Debug("skipping tx because the max pfb message count was reached", "tx", tmbytes.HexBytes(coretypes.Tx(tx.Tx).Hash())) + continue + } + pfbMessageCount += len(sdkTx.GetMsgs()) + ctx, err = handler(ctx, sdkTx, false) // either the transaction is invalid (ie incorrect nonce) and we // simply want to remove this tx, or we're catching a panic from one diff --git a/cmd/celestia-appd/cmd/start.go b/cmd/celestia-appd/cmd/start.go index 04c64173c3..763f1b6cd5 100644 --- a/cmd/celestia-appd/cmd/start.go +++ b/cmd/celestia-appd/cmd/start.go @@ -4,7 +4,6 @@ package cmd // start command flag. import ( - "errors" "fmt" "io" "net" @@ -566,37 +565,30 @@ func addCommands( ) } -// checkBBR checks is bbr is configured to be used as a congestion control algo. +// checkBBR checks if BBR is enabled. func checkBBR(command *cobra.Command) error { const ( - errorMsg = ` -// The BBR congestion control algorithm does not appear to be enabled in this -// system's kernel. This is important for the p2p stack to be performant. -// -// to enable bbr call: -// + warning = ` +The BBR (Bottleneck Bandwidth and Round-trip propagation time) congestion control algorithm is not enabled in this system's kernel. +BBR is important for the performance of the p2p stack. + +To enable BBR: sudo modprobe tcp_bbr net.core.default_qdisc=fq net.ipv4.tcp_congestion_control=bbr sudo sysctl -p -// -// and can be verified to be running using -// + +Then verify BBR is enabled: sysctl net.ipv4.tcp_congestion_control -// This might not work for all systems, you might have to search online to -// figure out how to enable bbr for your system. -// -// While this node will get worse performance using something other than bbr, -// If you need to bypass this block use the "--force-no-bbr true" flag. - ` +This node will get worse p2p performance using a different congestion control algorithm. +If you need to bypass this check use the --force-no-bbr flag. +` ) - noBBRErr := errors.New(errorMsg) - forceNoBBR, err := command.Flags().GetBool(FlagForceNoBBR) if err != nil { - return noBBRErr + return err } if forceNoBBR { return nil @@ -605,11 +597,13 @@ sysctl net.ipv4.tcp_congestion_control cmd := exec.Command("sysctl", "net.ipv4.tcp_congestion_control") output, err := cmd.Output() if err != nil { - return err + fmt.Print(warning) + return fmt.Errorf("failed to execute 'sysctl net.ipv4.tcp_congestion_control' %w", err) } if !strings.Contains(string(output), "bbr") { - return noBBRErr + fmt.Print(warning) + return fmt.Errorf("BBR not enabled because output %v does not contain 'bbr'", string(output)) } return nil diff --git a/Dockerfile b/docker/Dockerfile similarity index 82% rename from Dockerfile rename to docker/Dockerfile index 050d45a306..0e3c4bf51d 100644 --- a/Dockerfile +++ b/docker/Dockerfile @@ -4,10 +4,16 @@ # # Separating the builder and runtime image allows the runtime image to be # considerably smaller because it doesn't need to have Golang installed. -ARG BUILDER_IMAGE=docker.io/golang:1.22.6-alpine3.19 +ARG BUILDER_IMAGE=docker.io/golang:1.23.1-alpine3.20 ARG RUNTIME_IMAGE=docker.io/alpine:3.19 ARG TARGETOS ARG TARGETARCH +# Use build args to override the maxuimum square size of the docker image i.e. +# docker build --build-arg MAX_SQUARE_SIZE=64 -t celestia-app:latest . +ARG MAX_SQUARE_SIZE +# Use build args to override the upgrade height delay of the docker image i.e. +# docker build --build-arg UPGRADE_HEIGHT_DELAY=1000 -t celestia-app:latest . +ARG UPGRADE_HEIGHT_DELAY # Stage 1: Build the celestia-appd binary inside a builder image that will be discarded later. # Ignore hadolint rule because hadolint can't parse the variable. @@ -28,6 +34,8 @@ COPY . /celestia-app WORKDIR /celestia-app RUN uname -a &&\ CGO_ENABLED=${CGO_ENABLED} GOOS=${TARGETOS} GOARCH=${TARGETARCH} \ + OVERRIDE_MAX_SQUARE_SIZE=${MAX_SQUARE_SIZE} \ + OVERRIDE_UPGRADE_HEIGHT_DELAY=${UPGRADE_HEIGHT_DELAY} \ make build # Stage 2: Create a minimal image to run the celestia-appd binary diff --git a/docker/txsim.sh b/docker/txsim.sh deleted file mode 100644 index cefe6b57ce..0000000000 --- a/docker/txsim.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -CREATE_KEY=0 -KEY_PATH="/home/celestia" -GRPC_ENDPOINT="" -POLL_TIME="" -BLOB=0 -BLOB_AMOUNTS="1" -BLOB_SIZES="100-1000" -KEY_MNEMONIC="" -SEED=0 -SEND=0 -SEND_AMOUNT=1000 -SEND_ITERATIONS=1000 -STAKE=0 -STAKE_VALUE=1000 - -while getopts "k:p:f:g:t:b:a:s:m:d:e:i:v:u:w:" opt; do - case ${opt} in - k ) - CREATE_KEY=$OPTARG - ;; - p ) - KEY_PATH=$OPTARG - ;; - g ) - GRPC_ENDPOINT=$OPTARG - ;; - t ) - POLL_TIME=$OPTARG - ;; - b ) - BLOB=$OPTARG - ;; - a ) - BLOB_AMOUNTS=$OPTARG - ;; - s ) - BLOB_SIZES=$OPTARG - ;; - m ) - KEY_MNEMONIC=$OPTARG - ;; - d ) - SEED=$OPTARG - ;; - e ) - SEND=$OPTARG - ;; - i ) - SEND_AMOUNT=$OPTARG - ;; - v ) - SEND_ITERATIONS=$OPTARG - ;; - u ) - STAKE=$OPTARG - ;; - w ) - STAKE_VALUE=$OPTARG - ;; - \? ) - echo "Invalid option: $OPTARG" 1>&2 - exit 1 - ;; - : ) - echo "Invalid option: $OPTARG requires an argument" 1>&2 - exit 1 - ;; - esac -done -shift $((OPTIND -1)) - -if [ "$CREATE_KEY" -eq 1 ]; then - echo "Creating a new keyring-test for the txsim" - /bin/celestia-appd keys add sim --keyring-backend test --home $KEY_PATH - sleep 5 -fi - -# Running a tx simulator -txsim --key-path $KEY_PATH \ - --grpc-endpoint $GRPC_ENDPOINT \ - --poll-time $POLL_TIME \ - --blob $BLOB \ - --blob-amounts $BLOB_AMOUNTS \ - --blob-sizes $BLOB_SIZES \ - --key-mnemonic "$KEY_MNEMONIC" \ - --seed $SEED \ - --send $SEND \ - --send-amount $SEND_AMOUNT \ - --send-iterations $SEND_ITERATIONS \ - --stake $STAKE \ - --stake-value $STAKE_VALUE \ - --ignore-failures diff --git a/docker/Dockerfile_txsim b/docker/txsim/Dockerfile similarity index 70% rename from docker/Dockerfile_txsim rename to docker/txsim/Dockerfile index 471db9c76a..563a06ab09 100644 --- a/docker/Dockerfile_txsim +++ b/docker/txsim/Dockerfile @@ -1,5 +1,5 @@ -# Stage 1: generate celestia-appd binary -FROM --platform=$BUILDPLATFORM docker.io/golang:1.22.6-alpine3.19 as builder +# Stage 1: generate txsim binary +FROM --platform=$BUILDPLATFORM docker.io/golang:1.23.1-alpine3.20 as builder ARG TARGETOS ARG TARGETARCH @@ -16,11 +16,10 @@ RUN apk update && apk add --no-cache \ musl-dev COPY . /celestia-app WORKDIR /celestia-app -# we need the celestia-appd build as we might want to create an account -# internally for txsimulation + RUN uname -a &&\ CGO_ENABLED=${CGO_ENABLED} GOOS=${TARGETOS} GOARCH=${TARGETARCH} \ - make build && make txsim-build + make txsim-build # Stage 2: create a minimal image with the binary FROM docker.io/alpine:3.20 @@ -45,18 +44,14 @@ RUN apk update && apk add --no-cache \ -s /sbin/nologin \ -u ${UID} -# Copy in the celestia-appd binary -COPY --from=builder /celestia-app/build/celestia-appd /bin/celestia-appd +# Copy in the txsim binary COPY --from=builder /celestia-app/build/txsim /bin/txsim -COPY --chown=${USER_NAME}:${USER_NAME} docker/txsim.sh /opt/entrypoint.sh +COPY --chown=${USER_NAME}:${USER_NAME} docker/txsim/entrypoint.sh /opt/entrypoint.sh USER ${USER_NAME} # Set the working directory to the home directory. WORKDIR ${CELESTIA_HOME} -# grpc, rpc, api ports -EXPOSE 26657 1317 9090 - ENTRYPOINT [ "/bin/bash", "/opt/entrypoint.sh" ] diff --git a/docker/README.md b/docker/txsim/README.md similarity index 100% rename from docker/README.md rename to docker/txsim/README.md diff --git a/docker/txsim/entrypoint.sh b/docker/txsim/entrypoint.sh new file mode 100644 index 0000000000..f919a4d33d --- /dev/null +++ b/docker/txsim/entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +echo "Starting txsim with command:" +echo "/bin/txsim $@" +echo "" + +exec /bin/txsim $@ diff --git a/docs/audit/informal-systems-authored-blobs.pdf b/docs/audit/informal-systems-authored-blobs.pdf new file mode 100644 index 0000000000..b2ac02776a Binary files /dev/null and b/docs/audit/informal-systems-authored-blobs.pdf differ diff --git a/docs/maintainers/docker.md b/docs/maintainers/docker.md new file mode 100644 index 0000000000..37dfff2d9e --- /dev/null +++ b/docs/maintainers/docker.md @@ -0,0 +1,29 @@ +# Docker + +## Context + +Github Actions should automatically build and publish a Docker image for each release. If Github Actions failed, you can manually build and publish a Docker image using this guide. + +## Prerequisites + +1. Navigate to and generate a new token with the `write:packages` scope. + +## Steps + +1. Verify that a Docker image with the correct tag doesn't already exist for the release you're trying to create publish on [GHCR](https://github.com/celestiaorg/celestia-app/pkgs/container/celestia-app/versions) + +1. In a new terminal + + ```shell + # Set the CR_PAT environment variable to your token + export CR_PAT=YOUR_TOKEN + # Login to the GitHub Container Registry + echo $CR_PAT | docker login ghcr.io -u USERNAME --password-stdin + + # Tell docker to use buildx for the multiple platform support + docker buildx create --use + # Build the image, in this example the v2.2.0-mocha image + docker buildx build --platform linux/amd64,linux/arm64 -t ghcr.io/celestiaorg/celestia-app:v2.2.0-mocha --push . + ``` + +1. Verify that a Docker image with the correct tag was published on [GHCR](https://github.com/celestiaorg/celestia-app/pkgs/container/celestia-app/versions). diff --git a/docs/maintainers/release-guide.md b/docs/maintainers/release-guide.md index 7260e95192..698799235e 100644 --- a/docs/maintainers/release-guide.md +++ b/docs/maintainers/release-guide.md @@ -27,10 +27,12 @@ The target audience for this guide is maintainers of this repo. In general, the ## Official Release -Follow the [creating a release candidate](#creating-a-release-candidate) section with the following considerations: +Follow the [creating a release candidate](#creating-a-release-candidate) section with the following considerations: - The version tag should not include the `-rc` suffix. +- If the release targets a testnet, suffix the release with `-arabica` or `-mocha`. - The release notes should contain an **Upgrade Notice** section with notable changes for node operators or library consumers. +- The release notes section should contain a link to https://github.com/celestiaorg/celestia-app/blob/main/docs/release-notes/release-notes.md where we capture breaking changes After creating the release: diff --git a/docs/release-notes/pending-release.md b/docs/release-notes/pending-release.md index 80c7c228eb..c7e9198b3a 100644 --- a/docs/release-notes/pending-release.md +++ b/docs/release-notes/pending-release.md @@ -1,11 +1,7 @@ -# Pending Nelease +# Pending Release -## v3.0.0 [Unreleased] +## v4.0.0 -### Node Operators +### Node Operators (v4.0.0) -- Consensus node operators should enable the BBR (Bottleneck Bandwidth and Round-trip propagation time) congestion control algorithm. See [#3774](https://github.com/celestiaorg/celestia-app/pull/3774). - -### Library Consumers - -- Namespace and share constants in the `appconsts` package were moved to [celestiaorg/go-square](https://github.com/celestiaorg/go-square). See [#3765](https://github.com/celestiaorg/celestia-app/pull/3765). +### Library Consumers (v4.0.0) diff --git a/docs/release-notes/release-notes.md b/docs/release-notes/release-notes.md index 5cc49d1010..49f46b68d4 100644 --- a/docs/release-notes/release-notes.md +++ b/docs/release-notes/release-notes.md @@ -2,17 +2,58 @@ This guide provides notes for major version releases. These notes may be helpful for users when upgrading from previous major versions. +## v3.0.0 + +### Node Operators (v3.0.0) + +- Consensus node operators must enable the BBR (Bottleneck Bandwidth and Round-trip propagation time) congestion control algorithm. See [#3774](https://github.com/celestiaorg/celestia-app/pull/3774). + - if using linux in docker, kubernetes, a vm or baremetal, this can be done by calling the `make enable-bbr` command on the host machine. +- Consensus node operators should manually configure their node's mempool `ttl-num-blocks = 12` in config.toml. An example command to do this: + + ```bash + sed -i 's/ttl-num-blocks = 5/ttl-num-blocks = 12/' ~/.celestia-app/config/config.toml + ``` + +- Upgrades now use the `x/signal` module to coordinate the network to an upgrade height. + +The following command can be used, if you are a validator in the active set, to signal to upgrade to v3 + +```bash +celestia-appd tx signal signal 3 +``` + +You can track the tally of signalling by validators using the following query + +```bash +celestia-appd query signal tally 3 +``` + +Once 5/6+ of the voting power have signalled, the upgrade will be ready. There is a hard coded delay between confirmation of the upgrade and execution to the new state machine. + +To view the upcoming upgrade height use the following query: + +```bash +celestia-appd query signal upgrade +> An upgrade is pending to app version 3 at height 2348907. +``` + +For more information refer to the module [docs](../../x/signal/README.md) + +### Library Consumers (v3.0.0) + +- Namespace and share constants in the `appconsts` package were moved to [celestiaorg/go-square](https://github.com/celestiaorg/go-square). See [#3765](https://github.com/celestiaorg/celestia-app/pull/3765). + ## [v2.0.0](https://github.com/celestiaorg/celestia-app/releases/tag/v2.0.0) -### Node Operators +### Node Operators (v2.0.0) If you are a consensus node operator, please follow the communication channels listed under [network upgrades](https://docs.celestia.org/nodes/participate#network-upgrades) to learn when this release is recommended for each network (e.g. Mocha, Mainnet Beta). -Consensus node operators are expected to upgrade to this release _prior_ to the Lemongrass hardfork if they intend to continue participating in the network. The command used to start the [consensus node](https://docs.celestia.org/nodes/consensus-node#start-the-consensus-node) or [validator node](https://docs.celestia.org/nodes/validator-node#run-the-validator-node) will accept an additional `--v2-upgrade-height` flag. See [this table](https://docs.celestia.org/nodes/hardfork-process#lemongrass-hardfork) for upgrade heights for each network. +Consensus node operators are expected to upgrade to this release _prior_ to the Lemongrass hardfork if they intend to continue participating in the network. The command used to start the [consensus node](https://docs.celestia.org/nodes/consensus-node#start-the-consensus-node) or [validator node](https://docs.celestia.org/nodes/validator-node#run-the-validator-node) will accept an additional `--v2-upgrade-height` flag. See [this table](https://docs.celestia.org/nodes/network-upgrade-process#lemongrass-network-upgrade) for upgrade heights for each network. Consensus node operators should enable the BBR (Bottleneck Bandwidth and Round-trip propagation time) congestion control algorithm. See [#3812](https://github.com/celestiaorg/celestia-app/pull/3812). -### Library Consumers +### Library Consumers (v2.0.0) If you are a library consumer, a number of the Go APIs have changed since celestia-app v1.x.x. Some of the notable changes are: diff --git a/go.mod b/go.mod index dae6cbfdc0..15341387e8 100644 --- a/go.mod +++ b/go.mod @@ -2,22 +2,22 @@ module github.com/celestiaorg/celestia-app/v3 go 1.23.1 -toolchain go1.23.2 - require ( cosmossdk.io/errors v1.0.1 cosmossdk.io/math v1.3.0 github.com/celestiaorg/blobstream-contracts/v3 v3.1.0 - github.com/celestiaorg/go-square/v2 v2.0.0-rc2 - github.com/celestiaorg/knuu v0.14.0 + github.com/celestiaorg/go-square v1.1.1 + github.com/celestiaorg/go-square/v2 v2.0.0 + github.com/celestiaorg/knuu v0.16.1 github.com/celestiaorg/nmt v0.22.2 github.com/celestiaorg/rsmt2d v0.14.0 + github.com/cometbft/cometbft-db v1.0.1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 github.com/cosmos/cosmos-sdk v0.46.16 github.com/cosmos/gogoproto v1.7.0 github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v6 v6.1.2 github.com/cosmos/ibc-go/v6 v6.2.2 - github.com/ethereum/go-ethereum v1.14.7 + github.com/ethereum/go-ethereum v1.14.11 github.com/gogo/protobuf v1.3.3 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 @@ -32,21 +32,23 @@ require ( github.com/tendermint/tendermint v0.34.29 github.com/tendermint/tm-db v0.6.7 golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e - google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 - google.golang.org/grpc v1.66.0 - google.golang.org/protobuf v1.34.2 + google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 + google.golang.org/grpc v1.67.1 + google.golang.org/protobuf v1.35.1 gopkg.in/yaml.v2 v2.4.0 + k8s.io/apimachinery v0.31.1 ) require ( cloud.google.com/go v0.112.1 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect cloud.google.com/go/iam v1.1.6 // indirect cloud.google.com/go/storage v1.38.0 // indirect filippo.io/edwards25519 v1.0.0-rc.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect + github.com/DataDog/zstd v1.5.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/Workiva/go-datastructures v1.0.53 // indirect @@ -55,27 +57,34 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/bufbuild/protocompile v0.14.1 // indirect github.com/celestiaorg/bittwister v0.0.0-20231213180407-65cdbaf5b8c7 // indirect github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/cilium/ebpf v0.12.3 // indirect github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v1.1.2 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect - github.com/cometbft/cometbft-db v0.7.0 // indirect github.com/confio/ics23/go v0.9.1 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/containerd/continuity v0.4.2 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/gorocksdb v1.2.0 // indirect github.com/cosmos/iavl v0.19.6 // indirect github.com/cosmos/ledger-cosmos-go v0.13.2 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -83,18 +92,18 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgraph-io/badger/v4 v4.3.0 // indirect + github.com/dgraph-io/ristretto v0.1.2-0.20240116140435-c67e07994f91 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/distribution/reference v0.5.0 // indirect - github.com/docker/docker v26.1.5+incompatible // indirect - github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect - github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/ethereum/c-kzg-4844 v1.0.0 // indirect + github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect @@ -104,15 +113,14 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-playground/validator/v10 v10.11.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/gateway v1.1.0 // indirect - github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/flatbuffers v1.12.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -134,32 +142,32 @@ require ( github.com/hashicorp/go-getter v1.7.4 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 // indirect - github.com/holiman/uint256 v1.3.0 // indirect + github.com/holiman/uint256 v1.3.1 // indirect github.com/iancoleman/orderedmap v0.2.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/joho/godotenv v1.5.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/klauspost/reedsolomon v1.12.1 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/linxGnu/grocksdb v1.9.3 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/minio/md5-simd v1.1.2 // indirect @@ -168,15 +176,14 @@ require ( github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/onsi/ginkgo v1.16.5 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -188,6 +195,7 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/regen-network/cosmos-proto v0.3.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/cors v1.8.3 // indirect github.com/rs/xid v1.5.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect @@ -197,33 +205,30 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.15.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/supranational/blst v0.3.11 // indirect + github.com/supranational/blst v0.3.13 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tendermint/go-amino v0.16.0 // indirect github.com/tidwall/btree v1.5.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect - github.com/ugorji/go/codec v1.2.9 // indirect github.com/ulikunitz/xz v0.5.10 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect - go.etcd.io/bbolt v1.3.6 // indirect + go.etcd.io/bbolt v1.3.11 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.30.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 // indirect go.opentelemetry.io/otel/metric v1.30.0 // indirect go.opentelemetry.io/otel/sdk v1.30.0 // indirect go.opentelemetry.io/otel/trace v1.30.0 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.27.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.24.0 // indirect @@ -235,13 +240,12 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.28.2 // indirect - k8s.io/apimachinery v0.28.2 // indirect - k8s.io/client-go v0.28.2 // indirect - k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/api v0.30.2 // indirect + k8s.io/client-go v0.30.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - nhooyr.io/websocket v1.8.6 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + nhooyr.io/websocket v1.8.17 // indirect rsc.io/tmplfunc v0.0.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect @@ -249,7 +253,7 @@ require ( ) replace ( - github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.24.1-sdk-v0.46.16 + github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v1.25.0-sdk-v0.46.16 // Pin to ledger-cosmos-go v0.12.4 to avoid a breaking change introduced in v0.13.0 // The following replace statement can be removed when we upgrade to cosmos-sdk >= v0.50.0 github.com/cosmos/ledger-cosmos-go => github.com/cosmos/ledger-cosmos-go v0.12.4 diff --git a/go.sum b/go.sum index 8bfade833c..c82543a946 100644 --- a/go.sum +++ b/go.sum @@ -71,8 +71,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= @@ -213,7 +213,6 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -280,8 +279,8 @@ github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1U github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= -github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= @@ -320,12 +319,14 @@ github.com/celestiaorg/blobstream-contracts/v3 v3.1.0 h1:h1Y4V3EMQ2mFmNtWt2sIhZI github.com/celestiaorg/blobstream-contracts/v3 v3.1.0/go.mod h1:x4DKyfKOSv1ZJM9NwV+Pw01kH2CD7N5zTFclXIVJ6GQ= github.com/celestiaorg/celestia-core v1.43.0-tm-v0.34.35.0.20241028112242-a31a8fe76311 h1:h5DHE1WwvQnbP9u6REwZN6TAEWtQTomEnqy4Yl90DLw= github.com/celestiaorg/celestia-core v1.43.0-tm-v0.34.35.0.20241028112242-a31a8fe76311/go.mod h1:bFr0lAGwaJ0mOHSBmib5/ca5pbBf1yKWGPs93Td0HPw= -github.com/celestiaorg/cosmos-sdk v1.24.1-sdk-v0.46.16 h1:SeQ7Y/CyOcUMKo7mQiexaj/pZ/xIgyuZFIwYZwpSkWE= -github.com/celestiaorg/cosmos-sdk v1.24.1-sdk-v0.46.16/go.mod h1:Bpl1LSWiDpQumgOhhMTZBMopqa0j7fRasIhvTZB44P0= -github.com/celestiaorg/go-square/v2 v2.0.0-rc2 h1:4D+ASgZGYVCsffc2uhPagACrvNiLZu9/CqNYvnlHCgg= -github.com/celestiaorg/go-square/v2 v2.0.0-rc2/go.mod h1:eeaU8f8jBpk3ZS/gaDZIlTesJR2F51QAmveNzWH6aEU= -github.com/celestiaorg/knuu v0.14.0 h1:96uaDHTzlTfhDLrAiygq9Ewow7UzOzGAbUvMwws1S4A= -github.com/celestiaorg/knuu v0.14.0/go.mod h1:5x/+tlLebBSfLmmSBm2ps6aLjnKLn5bOaZpUfI5FpsA= +github.com/celestiaorg/cosmos-sdk v1.25.0-sdk-v0.46.16 h1:f+fTe7GGk0/qgdzyqB8kk8EcDf9d6MC22khBTQiDXsU= +github.com/celestiaorg/cosmos-sdk v1.25.0-sdk-v0.46.16/go.mod h1:07Z8HJqS8Rw4XlZ+ok3D3NM/X/in8mvcGLvl0Zb5wrA= +github.com/celestiaorg/go-square v1.1.1 h1:Cy3p8WVspVcyOqHM8BWFuuYPwMitO1pYGe+ImILFZRA= +github.com/celestiaorg/go-square v1.1.1/go.mod h1:1EXMErhDrWJM8B8V9hN7dqJ2kUTClfwdqMOmF9yQUa0= +github.com/celestiaorg/go-square/v2 v2.0.0 h1:U5QV8/de5lc7glosfgyHhcxbFwNuwU4+6aYZ2RgjM04= +github.com/celestiaorg/go-square/v2 v2.0.0/go.mod h1:y0BolG0tRM7UN1sAQyDDUkT+aMJPwFIjviVvnCB62C0= +github.com/celestiaorg/knuu v0.16.1 h1:EOR/c9kvc0jZet/mma2qwAdlvEbl94bW9cC8FItkyBE= +github.com/celestiaorg/knuu v0.16.1/go.mod h1:y20nUmVWVgbzxBKHqmbwp3C0ZJ9J9ovCg1ylHo85hdQ= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= github.com/celestiaorg/nmt v0.22.2 h1:JmOMtZL9zWAed1hiwb9DDs+ELcKp/ZQZ3rPverge/V8= @@ -335,8 +336,8 @@ github.com/celestiaorg/rsmt2d v0.14.0/go.mod h1:4kxqiTdFev49sGiKXTDjohbWYOG5GlcI github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= @@ -374,14 +375,16 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.1 h1:XnKU22oiCLy2Xn8vp1re67cXg4SAasg/WDt1NtcRFaw= -github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= +github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= @@ -390,8 +393,8 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE github.com/coinbase/kryptology v1.8.0/go.mod h1:RYXOAPdzOGUe3qlSFkMGn58i3xUA8hmxYHksuq+8ciI= github.com/coinbase/rosetta-sdk-go v0.7.9 h1:lqllBjMnazTjIqYrOGv8h8jxjg9+hJazIGZr9ZvoCcA= github.com/coinbase/rosetta-sdk-go v0.7.9/go.mod h1:0/knutI7XGVqXmmH4OQD8OckFrbQ8yMsUZTG7FXCR2M= -github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= -github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/cometbft/cometbft-db v1.0.1 h1:SylKuLseMLQKw3+i8y8KozZyJcQSL98qEe2CGMCGTYE= +github.com/cometbft/cometbft-db v1.0.1/go.mod h1:EBrFs1GDRiTqrWXYi4v90Awf/gcdD5ExzdPbg4X8+mk= github.com/confio/ics23/go v0.9.1 h1:3MV46eeWwO3xCauKyAtuAdJYMyPnnchW4iLr2bTw6/U= github.com/confio/ics23/go v0.9.1/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= @@ -402,10 +405,8 @@ github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1 github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0= github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -468,24 +469,22 @@ github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/badger/v4 v4.3.0 h1:lcsCE1/1qrRhqP+zYx6xDZb8n7U+QlwNicpc676Ub40= +github.com/dgraph-io/badger/v4 v4.3.0/go.mod h1:Sc0T595g8zqAQRDf44n+z3wG4BOqLwceaFntt8KPxUM= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgraph-io/ristretto v0.1.2-0.20240116140435-c67e07994f91 h1:Pux6+xANi0I7RRo5E1gflI4EZ2yx3BGZ75JkAIvGEOA= +github.com/dgraph-io/ristretto v0.1.2-0.20240116140435-c67e07994f91/go.mod h1:swkazRqnUf1N62d0Nutz7KIj2UKqsm/H8tD0nBJAXqM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= -github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -519,10 +518,10 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.10.17/go.mod h1:Lt5WzjM07XlXc95YzrhosmR4J9Ahd6X2wyEV2SvGhk0= -github.com/ethereum/go-ethereum v1.14.7 h1:EHpv3dE8evQmpVEQ/Ne2ahB06n2mQptdwqaMNhAT29g= -github.com/ethereum/go-ethereum v1.14.7/go.mod h1:Mq0biU2jbdmKSZoqOj29017ygFrMnB5/Rifwp980W4o= -github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= -github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= +github.com/ethereum/go-ethereum v1.14.11 h1:8nFDCUUE67rPc6AKxFj7JKaOa2W/W1Rse3oS6LvvxEY= +github.com/ethereum/go-ethereum v1.14.11/go.mod h1:+l/fr42Mma+xBnhefL/+z11/hcmJ2egl+ScIVPjhc7E= +github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 h1:8NfxH2iXvJ60YRB8ChToFTUzl8awsc3cJ8CbLjGIl/A= +github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= @@ -536,8 +535,6 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= -github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -549,6 +546,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= @@ -556,14 +555,13 @@ github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU= -github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -597,30 +595,23 @@ github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaL github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= -github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= -github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.11.2 h1:q3SHpufmypg+erIExEKUmsgmhDTyhcJ38oeKGACXohU= -github.com/go-playground/validator/v10 v10.11.2/go.mod h1:NieE624vt4SCTJtD87arVLvdmjPAeV8BQlHtMnw9D7s= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= @@ -639,8 +630,6 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -685,9 +674,11 @@ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -735,8 +726,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -795,8 +786,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= @@ -827,9 +816,8 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -851,8 +839,8 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8 github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= -github.com/holiman/uint256 v1.3.0 h1:4wdcm/tnd0xXdu7iS3ruNvxkWwrb4aeBQv19ayYn8F4= -github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= +github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= @@ -896,8 +884,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= -github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= -github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -955,8 +941,6 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -964,6 +948,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linxGnu/grocksdb v1.9.3 h1:s1cbPcOd0cU2SKXRG1nEqCOWYAELQjdqg3RVI2MH9ik= +github.com/linxGnu/grocksdb v1.9.3/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -997,8 +983,8 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -1032,12 +1018,8 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1046,8 +1028,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= @@ -1057,6 +1037,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= @@ -1083,8 +1065,8 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -1094,8 +1076,8 @@ github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1 github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= -github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -1128,6 +1110,9 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -1183,13 +1168,13 @@ github.com/regen-network/cosmos-proto v0.3.1/go.mod h1:jO0sVX6a1B36nmE8C9xBFXpNw github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= @@ -1218,7 +1203,6 @@ github.com/shirou/gopsutil v3.21.6+incompatible h1:mmZtAlWSd8U2HeRTjswbnDLPxqsEo github.com/shirou/gopsutil v3.21.6+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -1278,12 +1262,10 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= -github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= +github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= github.com/tendermint/tm-db v0.6.7 h1:fE00Cbl0jayAoqlExN6oyQJ7fR/ZtoVOmvPJ//+shu8= @@ -1318,8 +1300,6 @@ github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3C github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.9 h1:rmenucSohSTiyL09Y+l2OCk+FrMxGMzho2+tjr5ticU= -github.com/ugorji/go/codec v1.2.9/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1334,6 +1314,8 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+ github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -1356,8 +1338,8 @@ gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1377,10 +1359,6 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1: go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 h1:hSWWvDjXHVLq9DkmB+77fl8v7+t+yYiS+eNkiplDK54= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0/go.mod h1:zG7KQql1WjZCaUJd+L/ReSYx4bjbYJxg5ws9ws+mYes= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= @@ -1393,8 +1371,6 @@ go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+ go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1543,8 +1519,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1570,8 +1546,8 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1642,7 +1618,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1691,7 +1666,6 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1792,8 +1766,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1979,8 +1953,8 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2025,8 +1999,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2043,8 +2017,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2084,10 +2058,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2097,20 +2068,21 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= -k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= -k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= -k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= -k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= -k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= +k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= +k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/go.work.example b/go.work.example index 85b25f4976..64b0c2f5d2 100644 --- a/go.work.example +++ b/go.work.example @@ -1,4 +1,4 @@ -go 1.22.6 +go 1.23.1 use ( . diff --git a/local_devnet/docker-compose.yml b/local_devnet/docker-compose.yml index 15c0a38704..1cf768d53d 100644 --- a/local_devnet/docker-compose.yml +++ b/local_devnet/docker-compose.yml @@ -6,6 +6,7 @@ services: container_name: core0 build: context: .. + dockerfile: ./docker/Dockerfile expose: - "26660" # for prometheus ports: @@ -31,6 +32,7 @@ services: container_name: core1 build: context: .. + dockerfile: ./docker/Dockerfile expose: - "26660" # for prometheus depends_on: @@ -59,6 +61,7 @@ services: container_name: core2 build: context: .. + dockerfile: ./docker/Dockerfile expose: - "26660" # for prometheus depends_on: @@ -87,6 +90,7 @@ services: container_name: core3 build: context: .. + dockerfile: ./docker/Dockerfile expose: - "26660" # for prometheus depends_on: diff --git a/local_devnet/scripts/start_core0.sh b/local_devnet/scripts/start_core0.sh index 1218ee1386..ee01080ac2 100644 --- a/local_devnet/scripts/start_core0.sh +++ b/local_devnet/scripts/start_core0.sh @@ -17,4 +17,5 @@ fi /bin/celestia-appd start \ --moniker core0 \ --rpc.laddr tcp://0.0.0.0:26657 \ - --home /opt + --home /opt \ + --force-no-bbr diff --git a/local_devnet/scripts/start_node_and_create_validator.sh b/local_devnet/scripts/start_node_and_create_validator.sh index c9ac25f3bb..2b29953666 100644 --- a/local_devnet/scripts/start_node_and_create_validator.sh +++ b/local_devnet/scripts/start_node_and_create_validator.sh @@ -66,7 +66,8 @@ fi # start node celestia-appd start \ ---home="${CELESTIA_HOME}" \ ---moniker="${MONIKER}" \ ---p2p.persistent_peers=e3c592c0c2ad4b05cef3791456b0d6dd4da72ed2@core0:26656 \ ---rpc.laddr=tcp://0.0.0.0:26657 + --home="${CELESTIA_HOME}" \ + --moniker="${MONIKER}" \ + --p2p.persistent_peers=e3c592c0c2ad4b05cef3791456b0d6dd4da72ed2@core0:26656 \ + --rpc.laddr=tcp://0.0.0.0:26657 \ + --force-no-bbr diff --git a/pkg/appconsts/consensus_consts.go b/pkg/appconsts/consensus_consts.go index c26e4078eb..43aa335f93 100644 --- a/pkg/appconsts/consensus_consts.go +++ b/pkg/appconsts/consensus_consts.go @@ -3,8 +3,6 @@ package appconsts import "time" const ( - TimeoutPropose = time.Second * 10 - TimeoutCommit = time.Second * 11 // GoalBlockTime is the target time interval between blocks. Since the block // interval isn't enforced at consensus, the real block interval isn't // guaranteed to exactly match GoalBlockTime. GoalBlockTime is currently targeted diff --git a/pkg/appconsts/global_consts.go b/pkg/appconsts/global_consts.go index 340f651840..1bcdb7ac6c 100644 --- a/pkg/appconsts/global_consts.go +++ b/pkg/appconsts/global_consts.go @@ -9,7 +9,7 @@ import ( // These constants were originally sourced from: // https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/consensus.md#constants // -// They can not change throughout the lifetime of a network. +// They cannot change throughout the lifetime of a network. const ( // DefaultShareVersion is the defacto share version. Use this if you are // unsure of which version to use. @@ -18,7 +18,7 @@ const ( // MinSquareSize is the smallest original square width. MinSquareSize = 1 - // MinshareCount is the minimum number of shares allowed in the original + // MinShareCount is the minimum number of shares allowed in the original // data square. MinShareCount = MinSquareSize * MinSquareSize diff --git a/pkg/appconsts/initial_consts.go b/pkg/appconsts/initial_consts.go index 1f97adf951..18cafc969d 100644 --- a/pkg/appconsts/initial_consts.go +++ b/pkg/appconsts/initial_consts.go @@ -17,10 +17,6 @@ const ( // maximum number of bytes allowed in a valid block. DefaultMaxBytes = DefaultGovMaxSquareSize * DefaultGovMaxSquareSize * share.ContinuationSparseShareContentSize - // DefaultGasPerBlobByte is the default gas cost deducted per byte of blob - // included in a PayForBlobs txn - DefaultGasPerBlobByte = 8 - // DefaultMinGasPrice is the default min gas price that gets set in the app.toml file. // The min gas price acts as a filter. Transactions below that limit will not pass // a nodes `CheckTx` and thus not be proposed by that node. @@ -30,6 +26,11 @@ const ( // to unbond in a proof of stake system. Any validator within this // time can be subject to slashing under conditions of misbehavior. DefaultUnbondingTime = 3 * 7 * 24 * time.Hour + + // DefaultNetworkMinGasPrice is used by x/minfee to prevent transactions from being + // included in a block if they specify a gas price lower than this. + // Only applies to app version >= 2 + DefaultNetworkMinGasPrice = 0.000001 // utia ) var DefaultUpperBoundMaxBytes = DefaultSquareSizeUpperBound * DefaultSquareSizeUpperBound * share.ContinuationSparseShareContentSize diff --git a/pkg/appconsts/overrides.go b/pkg/appconsts/overrides.go new file mode 100644 index 0000000000..39c5f4c6d0 --- /dev/null +++ b/pkg/appconsts/overrides.go @@ -0,0 +1,10 @@ +package appconsts + +// Set of values that can be overridden at compile time to modify the behavior of the app. +// WARNING: This should only be modified for testing purposes. All nodes in a network +// must have the same values for these constants. +// Look at the Makefile to see how these are set. +var ( + OverrideSquareSizeUpperBoundStr string + OverrideUpgradeHeightDelayStr string +) diff --git a/pkg/appconsts/prepare_proposal_consts.go b/pkg/appconsts/prepare_proposal_consts.go new file mode 100644 index 0000000000..c933aabd68 --- /dev/null +++ b/pkg/appconsts/prepare_proposal_consts.go @@ -0,0 +1,15 @@ +//go:build !bench_abci_methods + +package appconsts + +// The following consts are not consensus breaking and will be applied straight +// after this binary is started. +// These numbers softly constrain the processing time of blocks to 0.25sec. +// The benchmarks used to find these limits can be found in `app/benchmarks`. +const ( + // MaxPFBMessages is the maximum number of SDK messages, aside from PFBs, that a block can contain. + MaxPFBMessages = 200 + + // MaxNonPFBMessages is the maximum number of PFB messages a block can contain. + MaxNonPFBMessages = 600 +) diff --git a/pkg/appconsts/prepare_proposal_consts_bench.go b/pkg/appconsts/prepare_proposal_consts_bench.go new file mode 100644 index 0000000000..cc8141f791 --- /dev/null +++ b/pkg/appconsts/prepare_proposal_consts_bench.go @@ -0,0 +1,14 @@ +//go:build bench_abci_methods + +package appconsts + +// Note: these constants are set to these values only when running `bench_abci_methods` benchmarks. +// For the production values, check prepare_proposal_consts.go file. + +const ( + // MaxPFBMessages arbitrary high numbers for running benchmarks. + MaxPFBMessages = 999999999999 + + // MaxNonPFBMessages arbitrary high numbers for running benchmarks. + MaxNonPFBMessages = 999999999999 +) diff --git a/pkg/appconsts/v1/app_consts.go b/pkg/appconsts/v1/app_consts.go index 72b040f819..873d3ec18a 100644 --- a/pkg/appconsts/v1/app_consts.go +++ b/pkg/appconsts/v1/app_consts.go @@ -1,7 +1,15 @@ package v1 +import "time" + const ( Version uint64 = 1 SquareSizeUpperBound int = 128 SubtreeRootThreshold int = 64 + TimeoutPropose = time.Second * 10 + TimeoutCommit = time.Second * 11 + // UpgradeHeightDelay is the number of blocks after a quorum has been + // reached that the chain should upgrade to the new version. Assuming a block + // interval of 12 seconds, this is 7 days. + UpgradeHeightDelay = int64(7 * 24 * 60 * 60 / 12) // 7 days * 24 hours * 60 minutes * 60 seconds / 12 seconds per block = 50,400 blocks. ) diff --git a/pkg/appconsts/v2/app_consts.go b/pkg/appconsts/v2/app_consts.go index 2ef7a4075c..d02a97079b 100644 --- a/pkg/appconsts/v2/app_consts.go +++ b/pkg/appconsts/v2/app_consts.go @@ -1,10 +1,15 @@ package v2 +import "time" + const ( Version uint64 = 2 SquareSizeUpperBound int = 128 SubtreeRootThreshold int = 64 - // NetworkMinGasPrice is used by x/minfee to prevent transactions from being - // included in a block if they specify a gas price lower than this. - NetworkMinGasPrice float64 = 0.000001 // utia + TimeoutPropose = time.Second * 10 + TimeoutCommit = time.Second * 11 + // UpgradeHeightDelay is the number of blocks after a quorum has been + // reached that the chain should upgrade to the new version. Assuming a block + // interval of 12 seconds, this is 7 days. + UpgradeHeightDelay = int64(7 * 24 * 60 * 60 / 12) // 7 days * 24 hours * 60 minutes * 60 seconds / 12 seconds per block = 50,400 blocks. ) diff --git a/pkg/appconsts/v3/app_consts.go b/pkg/appconsts/v3/app_consts.go new file mode 100644 index 0000000000..3f9279518d --- /dev/null +++ b/pkg/appconsts/v3/app_consts.go @@ -0,0 +1,18 @@ +package v3 + +import "time" + +const ( + Version uint64 = 3 + SquareSizeUpperBound int = 128 + SubtreeRootThreshold int = 64 + TxSizeCostPerByte uint64 = 10 + GasPerBlobByte uint32 = 8 + MaxTxSize int = 2097152 // 2 MiB in bytes + TimeoutPropose = time.Millisecond * 3500 + TimeoutCommit = time.Millisecond * 4200 + // UpgradeHeightDelay is the number of blocks after a quorum has been + // reached that the chain should upgrade to the new version. Assuming a block + // interval of 12 seconds, this is 7 days. + UpgradeHeightDelay = int64(7 * 24 * 60 * 60 / 6) // 7 days * 24 hours * 60 minutes * 60 seconds / 6 seconds per block = 100,800 blocks. +) diff --git a/pkg/appconsts/versioned_consts.go b/pkg/appconsts/versioned_consts.go index 67c3c8a8f2..2455e87791 100644 --- a/pkg/appconsts/versioned_consts.go +++ b/pkg/appconsts/versioned_consts.go @@ -1,12 +1,16 @@ package appconsts import ( + "strconv" + "time" + v1 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v1" v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" ) const ( - LatestVersion = v2.Version + LatestVersion = v3.Version ) // SubtreeRootThreshold works as a target upper bound for the number of subtree @@ -18,15 +22,78 @@ const ( // // The rationale for this value is described in more detail in ADR-013. func SubtreeRootThreshold(_ uint64) int { - return v1.SubtreeRootThreshold + return v3.SubtreeRootThreshold } // SquareSizeUpperBound imposes an upper bound on the max effective square size. func SquareSizeUpperBound(_ uint64) int { - return v1.SquareSizeUpperBound + if OverrideSquareSizeUpperBoundStr != "" { + parsedValue, err := strconv.Atoi(OverrideSquareSizeUpperBoundStr) + if err != nil { + panic("Invalid OverrideSquareSizeUpperBoundStr value") + } + return parsedValue + } + return v3.SquareSizeUpperBound +} + +func TxSizeCostPerByte(_ uint64) uint64 { + return v3.TxSizeCostPerByte +} + +func GasPerBlobByte(_ uint64) uint32 { + return v3.GasPerBlobByte +} + +func MaxTxSize(_ uint64) int { + return v3.MaxTxSize } var ( DefaultSubtreeRootThreshold = SubtreeRootThreshold(LatestVersion) DefaultSquareSizeUpperBound = SquareSizeUpperBound(LatestVersion) + DefaultTxSizeCostPerByte = TxSizeCostPerByte(LatestVersion) + DefaultGasPerBlobByte = GasPerBlobByte(LatestVersion) ) + +func GetTimeoutPropose(v uint64) time.Duration { + switch v { + case v1.Version: + return v1.TimeoutPropose + case v2.Version: + return v2.TimeoutPropose + default: + return v3.TimeoutPropose + } +} + +func GetTimeoutCommit(v uint64) time.Duration { + switch v { + case v1.Version: + return v1.TimeoutCommit + case v2.Version: + return v2.TimeoutCommit + default: + return v3.TimeoutCommit + } +} + +// UpgradeHeightDelay returns the delay in blocks after a quorum has been reached that the chain should upgrade to the new version. +func UpgradeHeightDelay(v uint64) int64 { + if OverrideUpgradeHeightDelayStr != "" { + parsedValue, err := strconv.ParseInt(OverrideUpgradeHeightDelayStr, 10, 64) + if err != nil { + panic("Invalid OverrideUpgradeHeightDelayStr value") + } + return parsedValue + } + switch v { + case v1.Version: + return v1.UpgradeHeightDelay + case v2.Version: + return v2.UpgradeHeightDelay + default: + return v3.UpgradeHeightDelay + + } +} diff --git a/pkg/appconsts/versioned_consts_test.go b/pkg/appconsts/versioned_consts_test.go index 6fb5cfc48d..f621c0199e 100644 --- a/pkg/appconsts/versioned_consts_test.go +++ b/pkg/appconsts/versioned_consts_test.go @@ -1,7 +1,6 @@ package appconsts_test import ( - "fmt" "testing" "github.com/stretchr/testify/require" @@ -9,52 +8,75 @@ import ( "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" v1 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v1" v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" ) -func TestSubtreeRootThreshold(t *testing.T) { +func TestVersionedConsts(t *testing.T) { testCases := []struct { - version uint64 - expected int + name string + version uint64 + expectedConstant interface{} + got interface{} }{ { - version: v1.Version, - expected: v1.SubtreeRootThreshold, + name: "SubtreeRootThreshold v1", + version: v1.Version, + expectedConstant: v1.SubtreeRootThreshold, + got: appconsts.SubtreeRootThreshold(v1.Version), }, { - version: v2.Version, - expected: v2.SubtreeRootThreshold, + name: "SubtreeRootThreshold v2", + version: v2.Version, + expectedConstant: v2.SubtreeRootThreshold, + got: appconsts.SubtreeRootThreshold(v2.Version), + }, + { + name: "SubtreeRootThreshold v3", + version: v3.Version, + expectedConstant: v3.SubtreeRootThreshold, + got: appconsts.SubtreeRootThreshold(v3.Version), + }, + { + name: "SquareSizeUpperBound v1", + version: v1.Version, + expectedConstant: v1.SquareSizeUpperBound, + got: appconsts.SquareSizeUpperBound(v1.Version), + }, + { + name: "SquareSizeUpperBound v2", + version: v2.Version, + expectedConstant: v2.SquareSizeUpperBound, + got: appconsts.SquareSizeUpperBound(v2.Version), + }, + { + name: "SquareSizeUpperBound v3", + version: v3.Version, + expectedConstant: v3.SquareSizeUpperBound, + got: appconsts.SquareSizeUpperBound(v3.Version), + }, + { + name: "TxSizeCostPerByte v3", + version: v3.Version, + expectedConstant: v3.TxSizeCostPerByte, + got: appconsts.TxSizeCostPerByte(v3.Version), }, - } - - for _, tc := range testCases { - name := fmt.Sprintf("version %v", tc.version) - t.Run(name, func(t *testing.T) { - got := appconsts.SubtreeRootThreshold(tc.version) - require.Equal(t, tc.expected, got) - }) - } -} - -func TestSquareSizeUpperBound(t *testing.T) { - testCases := []struct { - version uint64 - expected int - }{ { - version: v1.Version, - expected: v1.SquareSizeUpperBound, + name: "GasPerBlobByte v3", + version: v3.Version, + expectedConstant: v3.GasPerBlobByte, + got: appconsts.GasPerBlobByte(v3.Version), }, { - version: v2.Version, - expected: v2.SquareSizeUpperBound, + name: "MaxTxSize v3", + version: v3.Version, + expectedConstant: v3.MaxTxSize, + got: appconsts.MaxTxSize(v3.Version), }, } for _, tc := range testCases { - name := fmt.Sprintf("version %v", tc.version) - t.Run(name, func(t *testing.T) { - got := appconsts.SquareSizeUpperBound(tc.version) - require.Equal(t, tc.expected, got) + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expectedConstant, tc.got) }) } } diff --git a/pkg/da/data_availability_header_test.go b/pkg/da/data_availability_header_test.go index fa73aeaa5e..7649750d09 100644 --- a/pkg/da/data_availability_header_test.go +++ b/pkg/da/data_availability_header_test.go @@ -88,7 +88,6 @@ func TestExtendShares(t *testing.T) { } for _, tt := range tests { - tt := tt _, err := ExtendShares(tt.shares) if tt.expectedErr { require.NotNil(t, err) @@ -122,7 +121,6 @@ func TestDataAvailabilityHeaderProtoConversion(t *testing.T) { } for _, tt := range tests { - tt := tt pdah, err := tt.dah.ToProto() require.NoError(t, err) resDah, err := DataAvailabilityHeaderFromProto(pdah) @@ -203,7 +201,6 @@ func Test_DAHValidateBasic(t *testing.T) { } for _, tt := range tests { - tt := tt err := tt.dah.ValidateBasic() if tt.expectErr { require.True(t, strings.Contains(err.Error(), tt.errStr), tt.name) diff --git a/pkg/user/account.go b/pkg/user/account.go index 8ab1d031f4..9d66dc0540 100644 --- a/pkg/user/account.go +++ b/pkg/user/account.go @@ -40,6 +40,10 @@ func (a Account) PubKey() cryptotypes.PubKey { return a.pubKey } +func (a Account) AccountNumber() uint64 { + return a.accountNumber +} + // Sequence returns the sequence number of the account. // This is locally tracked func (a Account) Sequence() uint64 { diff --git a/pkg/user/pruning_test.go b/pkg/user/pruning_test.go new file mode 100644 index 0000000000..6019325b19 --- /dev/null +++ b/pkg/user/pruning_test.go @@ -0,0 +1,50 @@ +package user + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPruningInTxTracker(t *testing.T) { + txClient := &TxClient{ + txTracker: make(map[string]txInfo), + } + numTransactions := 10 + + // Add 10 transactions to the tracker that are 10 and 5 minutes old + var txsToBePruned int + var txsNotReadyToBePruned int + for i := 0; i < numTransactions; i++ { + // 5 transactions will be pruned + if i%2 == 0 { + txClient.txTracker["tx"+fmt.Sprint(i)] = txInfo{ + signer: "signer" + fmt.Sprint(i), + sequence: uint64(i), + timestamp: time.Now(). + Add(-10 * time.Minute), + } + txsToBePruned++ + } else { + txClient.txTracker["tx"+fmt.Sprint(i)] = txInfo{ + signer: "signer" + fmt.Sprint(i), + sequence: uint64(i), + timestamp: time.Now(). + Add(-5 * time.Minute), + } + txsNotReadyToBePruned++ + } + } + + txTrackerBeforePruning := len(txClient.txTracker) + + // All transactions were indexed + require.Equal(t, numTransactions, len(txClient.txTracker)) + txClient.pruneTxTracker() + // Prunes the transactions that are 10 minutes old + // 5 transactions will be pruned + require.Equal(t, txsToBePruned, txTrackerBeforePruning-txsToBePruned) + require.Equal(t, len(txClient.txTracker), txsNotReadyToBePruned) +} diff --git a/pkg/user/tx_client.go b/pkg/user/tx_client.go index 752a37a74a..e8fa1cd408 100644 --- a/pkg/user/tx_client.go +++ b/pkg/user/tx_client.go @@ -12,7 +12,6 @@ import ( "time" "github.com/celestiaorg/go-square/v2/share" - blobtx "github.com/celestiaorg/go-square/v2/tx" "github.com/cosmos/cosmos-sdk/client" nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node" "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" @@ -26,7 +25,6 @@ import ( "github.com/celestiaorg/celestia-app/v3/app" "github.com/celestiaorg/celestia-app/v3/app/encoding" - apperrors "github.com/celestiaorg/celestia-app/v3/app/errors" "github.com/celestiaorg/celestia-app/v3/app/grpc/tx" "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/x/blob/types" @@ -34,12 +32,21 @@ import ( ) const ( - DefaultPollTime = 3 * time.Second - DefaultGasMultiplier float64 = 1.1 + DefaultPollTime = 3 * time.Second + DefaultGasMultiplier float64 = 1.1 + txTrackerPruningInterval = 10 * time.Minute ) type Option func(client *TxClient) +// txInfo is a struct that holds the sequence and the signer of a transaction +// in the local tx pool. +type txInfo struct { + sequence uint64 + signer string + timestamp time.Time +} + // TxResponse is a response from the chain after // a transaction has been submitted. type TxResponse struct { @@ -136,6 +143,9 @@ type TxClient struct { defaultGasPrice float64 defaultAccount string defaultAddress sdktypes.AccAddress + // txTracker maps the tx hash to the Sequence and signer of the transaction + // that was submitted to the chain + txTracker map[string]txInfo } // NewTxClient returns a new signer using the provided keyring @@ -168,6 +178,7 @@ func NewTxClient( defaultGasPrice: appconsts.DefaultMinGasPrice, defaultAccount: records[0].Name, defaultAddress: addr, + txTracker: make(map[string]txInfo), } for _, opt := range options { @@ -301,6 +312,12 @@ func (client *TxClient) SubmitTx(ctx context.Context, msgs []sdktypes.Msg, opts func (client *TxClient) BroadcastTx(ctx context.Context, msgs []sdktypes.Msg, opts ...TxOption) (*sdktypes.TxResponse, error) { client.mtx.Lock() defer client.mtx.Unlock() + + // prune transactions that are older than 10 minutes + // pruning has to be done in broadcast, since users + // might not always call ConfirmTx(). + client.pruneTxTracker() + account, err := client.getAccountNameFromMsgs(msgs) if err != nil { return nil, err @@ -367,23 +384,20 @@ func (client *TxClient) broadcastTx(ctx context.Context, txBytes []byte, signer return nil, err } if resp.TxResponse.Code != abci.CodeTypeOK { - if apperrors.IsNonceMismatchCode(resp.TxResponse.Code) { - // query the account to update the sequence number on-chain for the account - _, seqNum, err := QueryAccount(ctx, client.grpc, client.registry, client.signer.accounts[signer].address) - if err != nil { - return nil, fmt.Errorf("querying account for new sequence number: %w\noriginal tx response: %s", err, resp.TxResponse.RawLog) - } - if err := client.signer.SetSequence(signer, seqNum); err != nil { - return nil, fmt.Errorf("setting sequence: %w", err) - } - return client.retryBroadcastingTx(ctx, txBytes) - } broadcastTxErr := &BroadcastTxError{ TxHash: resp.TxResponse.TxHash, Code: resp.TxResponse.Code, ErrorLog: resp.TxResponse.RawLog, } - return resp.TxResponse, broadcastTxErr + return nil, broadcastTxErr + } + + // save the sequence and signer of the transaction in the local txTracker + // before the sequence is incremented + client.txTracker[resp.TxResponse.TxHash] = txInfo{ + sequence: client.signer.accounts[signer].Sequence(), + signer: signer, + timestamp: time.Now(), } // after the transaction has been submitted, we can increment the @@ -394,62 +408,13 @@ func (client *TxClient) broadcastTx(ctx context.Context, txBytes []byte, signer return resp.TxResponse, nil } -// retryBroadcastingTx creates a new transaction by copying over an existing transaction but creates a new signature with the -// new sequence number. It then calls `broadcastTx` and attempts to submit the transaction -func (client *TxClient) retryBroadcastingTx(ctx context.Context, txBytes []byte) (*sdktypes.TxResponse, error) { - blobTx, isBlobTx, err := blobtx.UnmarshalBlobTx(txBytes) - if isBlobTx { - // only check the error if the bytes are supposed to be of type blob tx - if err != nil { - return nil, err +// pruneTxTracker removes transactions from the local tx tracker that are older than 10 minutes +func (client *TxClient) pruneTxTracker() { + for hash, txInfo := range client.txTracker { + if time.Since(txInfo.timestamp) >= txTrackerPruningInterval { + delete(client.txTracker, hash) } - txBytes = blobTx.Tx - } - tx, err := client.signer.DecodeTx(txBytes) - if err != nil { - return nil, err - } - - opts := make([]TxOption, 0) - if granter := tx.FeeGranter(); granter != nil { - opts = append(opts, SetFeeGranter(granter)) - } - if payer := tx.FeePayer(); payer != nil { - opts = append(opts, SetFeePayer(payer)) - } - if memo := tx.GetMemo(); memo != "" { - opts = append(opts, SetMemo(memo)) - } - if fee := tx.GetFee(); fee != nil { - opts = append(opts, SetFee(fee.AmountOf(appconsts.BondDenom).Uint64())) - } - if gas := tx.GetGas(); gas > 0 { - opts = append(opts, SetGasLimit(gas)) - } - - txBuilder, err := client.signer.txBuilder(tx.GetMsgs(), opts...) - if err != nil { - return nil, err - } - signer, _, err := client.signer.signTransaction(txBuilder) - if err != nil { - return nil, fmt.Errorf("resigning transaction: %w", err) } - - newTxBytes, err := client.signer.EncodeTx(txBuilder.GetTx()) - if err != nil { - return nil, err - } - - // rewrap the blob tx if it was originally a blob tx - if isBlobTx { - newTxBytes, err = blobtx.MarshalBlobTx(newTxBytes, blobTx.Blobs...) - if err != nil { - return nil, err - } - } - - return client.broadcastTx(ctx, newTxBytes, signer) } // ConfirmTx periodically pings the provided node for the commitment of a transaction by its @@ -491,16 +456,45 @@ func (client *TxClient) ConfirmTx(ctx context.Context, txHash string) (*TxRespon } return nil, executionErr } + client.deleteFromTxTracker(txHash) return txResponse, nil case "EVICTED": - return nil, fmt.Errorf("tx was evicted from the mempool") + return nil, client.handleEvictions(txHash) default: + client.deleteFromTxTracker(txHash) return nil, fmt.Errorf("unknown tx: %s", txHash) } } } } +// handleEvictions handles the scenario where a transaction is evicted from the mempool. +// It removes the evicted transaction from the local tx tracker without incrementing +// the signer's sequence. +func (client *TxClient) handleEvictions(txHash string) error { + client.mtx.Lock() + defer client.mtx.Unlock() + // Get transaction from the local tx tracker + txInfo, exists := client.txTracker[txHash] + if !exists { + return fmt.Errorf("tx: %s not found in tx client txTracker; likely failed during broadcast", txHash) + } + // The sequence should be rolled back to the sequence of the transaction that was evicted to be + // ready for resubmission. All transactions with a later nonce will be kicked by the nodes tx pool. + if err := client.signer.SetSequence(txInfo.signer, txInfo.sequence); err != nil { + return fmt.Errorf("setting sequence: %w", err) + } + delete(client.txTracker, txHash) + return fmt.Errorf("tx was evicted from the mempool") +} + +// deleteFromTxTracker safely deletes a transaction from the local tx tracker. +func (client *TxClient) deleteFromTxTracker(txHash string) { + client.mtx.Lock() + defer client.mtx.Unlock() + delete(client.txTracker, txHash) +} + // EstimateGas simulates the transaction, calculating the amount of gas that was consumed during execution. The final // result will be multiplied by gasMultiplier(that is set in TxClient) func (client *TxClient) EstimateGas(ctx context.Context, msgs []sdktypes.Msg, opts ...TxOption) (uint64, error) { @@ -575,6 +569,7 @@ func (client *TxClient) checkAccountLoaded(ctx context.Context, account string) if err != nil { return fmt.Errorf("retrieving address from keyring: %w", err) } + // FIXME: have a less trusting way of getting the account number and sequence accNum, sequence, err := QueryAccount(ctx, client.grpc, client.registry, addr) if err != nil { return fmt.Errorf("querying account %s: %w", account, err) @@ -603,6 +598,14 @@ func (client *TxClient) getAccountNameFromMsgs(msgs []sdktypes.Msg) (string, err return record.Name, nil } +// GetTxFromTxTracker gets transaction info from the tx client's local tx tracker by its hash +func (client *TxClient) GetTxFromTxTracker(hash string) (sequence uint64, signer string, exists bool) { + client.mtx.Lock() + defer client.mtx.Unlock() + txInfo, exists := client.txTracker[hash] + return txInfo.sequence, txInfo.signer, exists +} + // Signer exposes the tx clients underlying signer func (client *TxClient) Signer() *Signer { return client.signer diff --git a/pkg/user/tx_client_test.go b/pkg/user/tx_client_test.go index 1588c3a060..6ae0c2efa2 100644 --- a/pkg/user/tx_client_test.go +++ b/pkg/user/tx_client_test.go @@ -5,21 +5,21 @@ import ( "testing" "time" + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v3/pkg/user" + "github.com/celestiaorg/celestia-app/v3/test/util/blobfactory" + "github.com/celestiaorg/celestia-app/v3/test/util/testnode" sdk "github.com/cosmos/cosmos-sdk/types" sdktx "github.com/cosmos/cosmos-sdk/types/tx" + "github.com/cosmos/cosmos-sdk/x/authz" bank "github.com/cosmos/cosmos-sdk/x/bank/types" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/rand" - - "github.com/celestiaorg/celestia-app/v3/app" - "github.com/celestiaorg/celestia-app/v3/app/encoding" - "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" - "github.com/celestiaorg/celestia-app/v3/pkg/user" - "github.com/celestiaorg/celestia-app/v3/test/util/blobfactory" - "github.com/celestiaorg/celestia-app/v3/test/util/testnode" ) func TestTxClientTestSuite(t *testing.T) { @@ -39,15 +39,7 @@ type TxClientTestSuite struct { } func (suite *TxClientTestSuite) SetupSuite() { - suite.encCfg = encoding.MakeConfig(app.ModuleEncodingRegisters...) - config := testnode.DefaultConfig(). - WithFundedAccounts("a", "b", "c"). - WithAppCreator(testnode.CustomAppCreator("0utia")) - suite.ctx, _, _ = testnode.NewNetwork(suite.T(), config) - _, err := suite.ctx.WaitForHeight(1) - suite.Require().NoError(err) - suite.txClient, err = user.SetupTxClient(suite.ctx.GoContext(), suite.ctx.Keyring, suite.ctx.GRPCClient, suite.encCfg, user.WithGasMultiplier(1.2)) - suite.Require().NoError(err) + suite.encCfg, suite.txClient, suite.ctx = setupTxClient(suite.T(), testnode.DefaultTendermintConfig().Mempool.TTLDuration) suite.serviceClient = sdktx.NewServiceClient(suite.ctx.GRPCClient) } @@ -162,9 +154,11 @@ func (suite *TxClientTestSuite) TestConfirmTx() { ctx, cancel := context.WithTimeout(suite.ctx.GoContext(), time.Second) defer cancel() + seqBeforeBroadcast := suite.txClient.Signer().Account(suite.txClient.DefaultAccountName()).Sequence() msg := bank.NewMsgSend(suite.txClient.DefaultAddress(), testnode.RandomAddress().(sdk.AccAddress), sdk.NewCoins(sdk.NewInt64Coin(app.BondDenom, 10))) resp, err := suite.txClient.BroadcastTx(ctx, []sdk.Msg{msg}) require.NoError(t, err) + assertTxInTxTracker(t, suite.txClient, resp.TxHash, suite.txClient.DefaultAccountName(), seqBeforeBroadcast) _, err = suite.txClient.ConfirmTx(ctx, resp.TxHash) require.Error(t, err) @@ -174,48 +168,90 @@ func (suite *TxClientTestSuite) TestConfirmTx() { t.Run("should error when tx is not found", func(t *testing.T) { ctx, cancel := context.WithTimeout(suite.ctx.GoContext(), 5*time.Second) defer cancel() - _, err := suite.txClient.ConfirmTx(ctx, "E32BD15CAF57AF15D17B0D63CF4E63A9835DD1CEBB059C335C79586BC3013728") - require.Contains(t, err.Error(), "unknown tx: E32BD15CAF57AF15D17B0D63CF4E63A9835DD1CEBB059C335C79586BC3013728") + resp, err := suite.txClient.ConfirmTx(ctx, "E32BD15CAF57AF15D17B0D63CF4E63A9835DD1CEBB059C335C79586BC3013728") + require.Contains(t, err.Error(), "transaction with hash E32BD15CAF57AF15D17B0D63CF4E63A9835DD1CEBB059C335C79586BC3013728 not found; it was likely rejected") + require.Nil(t, resp) }) t.Run("should return error log when execution fails", func(t *testing.T) { + seqBeforeBroadcast := suite.txClient.Signer().Account(suite.txClient.DefaultAccountName()).Sequence() innerMsg := bank.NewMsgSend(testnode.RandomAddress().(sdk.AccAddress), testnode.RandomAddress().(sdk.AccAddress), sdk.NewCoins(sdk.NewInt64Coin(app.BondDenom, 10))) msg := authz.NewMsgExec(suite.txClient.DefaultAddress(), []sdk.Msg{innerMsg}) resp, err := suite.txClient.BroadcastTx(suite.ctx.GoContext(), []sdk.Msg{&msg}, fee, gas) require.NoError(t, err) - _, err = suite.txClient.ConfirmTx(suite.ctx.GoContext(), resp.TxHash) + assertTxInTxTracker(t, suite.txClient, resp.TxHash, suite.txClient.DefaultAccountName(), seqBeforeBroadcast) + + confirmTxResp, err := suite.txClient.ConfirmTx(suite.ctx.GoContext(), resp.TxHash) require.Error(t, err) require.Contains(t, err.Error(), "authorization not found") + require.Nil(t, confirmTxResp) + require.True(t, wasRemovedFromTxTracker(resp.TxHash, suite.txClient)) }) t.Run("should success when tx is found immediately", func(t *testing.T) { addr := suite.txClient.DefaultAddress() + seqBeforeBroadcast := suite.txClient.Signer().Account(suite.txClient.DefaultAccountName()).Sequence() msg := bank.NewMsgSend(addr, testnode.RandomAddress().(sdk.AccAddress), sdk.NewCoins(sdk.NewInt64Coin(app.BondDenom, 10))) resp, err := suite.txClient.BroadcastTx(suite.ctx.GoContext(), []sdk.Msg{msg}, fee, gas) require.NoError(t, err) - require.NotNil(t, resp) + require.Equal(t, resp.Code, abci.CodeTypeOK) + assertTxInTxTracker(t, suite.txClient, resp.TxHash, suite.txClient.DefaultAccountName(), seqBeforeBroadcast) + ctx, cancel := context.WithTimeout(suite.ctx.GoContext(), 30*time.Second) defer cancel() confirmTxResp, err := suite.txClient.ConfirmTx(ctx, resp.TxHash) require.NoError(t, err) require.Equal(t, abci.CodeTypeOK, confirmTxResp.Code) + require.True(t, wasRemovedFromTxTracker(resp.TxHash, suite.txClient)) }) t.Run("should error when tx is found with a non-zero error code", func(t *testing.T) { balance := suite.queryCurrentBalance(t) addr := suite.txClient.DefaultAddress() + seqBeforeBroadcast := suite.txClient.Signer().Account(suite.txClient.DefaultAccountName()).Sequence() // Create a msg send with out of balance, ensure this tx fails msg := bank.NewMsgSend(addr, testnode.RandomAddress().(sdk.AccAddress), sdk.NewCoins(sdk.NewInt64Coin(app.BondDenom, 1+balance))) resp, err := suite.txClient.BroadcastTx(suite.ctx.GoContext(), []sdk.Msg{msg}, fee, gas) require.NoError(t, err) - require.NotNil(t, resp) - _, err = suite.txClient.ConfirmTx(suite.ctx.GoContext(), resp.TxHash) + require.Equal(t, resp.Code, abci.CodeTypeOK) + assertTxInTxTracker(t, suite.txClient, resp.TxHash, suite.txClient.DefaultAccountName(), seqBeforeBroadcast) + + confirmTxResp, err := suite.txClient.ConfirmTx(suite.ctx.GoContext(), resp.TxHash) require.Error(t, err) + require.Nil(t, confirmTxResp) code := err.(*user.ExecutionError).Code require.NotEqual(t, abci.CodeTypeOK, code) + require.True(t, wasRemovedFromTxTracker(resp.TxHash, suite.txClient)) }) } +func TestEvictions(t *testing.T) { + _, txClient, ctx := setupTxClient(t, 1*time.Nanosecond) + + fee := user.SetFee(1e6) + gas := user.SetGasLimit(1e6) + + // Keep submitting the transaction until we get the eviction error + sender := txClient.Signer().Account(txClient.DefaultAccountName()) + msg := bank.NewMsgSend(sender.Address(), testnode.RandomAddress().(sdk.AccAddress), sdk.NewCoins(sdk.NewInt64Coin(app.BondDenom, 10))) + var seqBeforeEviction uint64 + // Loop five times until the tx is evicted + for i := 0; i < 5; i++ { + seqBeforeEviction = sender.Sequence() + resp, err := txClient.BroadcastTx(ctx.GoContext(), []sdk.Msg{msg}, fee, gas) + require.NoError(t, err) + _, err = txClient.ConfirmTx(ctx.GoContext(), resp.TxHash) + if err != nil { + if err.Error() == "tx was evicted from the mempool" { + break + } + } + } + + seqAfterEviction := sender.Sequence() + require.Equal(t, seqBeforeEviction, seqAfterEviction) +} + func (suite *TxClientTestSuite) TestGasEstimation() { addr := suite.txClient.DefaultAddress() msg := bank.NewMsgSend(addr, testnode.RandomAddress().(sdk.AccAddress), sdk.NewCoins(sdk.NewInt64Coin(app.BondDenom, 10))) @@ -281,3 +317,36 @@ func (suite *TxClientTestSuite) queryCurrentBalance(t *testing.T) int64 { require.NoError(t, err) return balanceResp.Balances.AmountOf(app.BondDenom).Int64() } + +func wasRemovedFromTxTracker(txHash string, txClient *user.TxClient) bool { + seq, signer, exists := txClient.GetTxFromTxTracker(txHash) + return !exists && seq == 0 && signer == "" +} + +// asserts that a tx was indexed in the tx tracker and that the sequence does not increase +func assertTxInTxTracker(t *testing.T, txClient *user.TxClient, txHash string, expectedSigner string, seqBeforeBroadcast uint64) { + seqFromTxTracker, signer, exists := txClient.GetTxFromTxTracker(txHash) + require.True(t, exists) + require.Equal(t, expectedSigner, signer) + seqAfterBroadcast := txClient.Signer().Account(expectedSigner).Sequence() + // TxInfo is indexed before the nonce is increased + require.Equal(t, seqBeforeBroadcast, seqFromTxTracker) + // Successfully broadcast transaction increases the sequence + require.Equal(t, seqAfterBroadcast, seqBeforeBroadcast+1) +} + +func setupTxClient(t *testing.T, ttlDuration time.Duration) (encoding.Config, *user.TxClient, testnode.Context) { + encCfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + defaultTmConfig := testnode.DefaultTendermintConfig() + defaultTmConfig.Mempool.TTLDuration = ttlDuration + testnodeConfig := testnode.DefaultConfig(). + WithTendermintConfig(defaultTmConfig). + WithFundedAccounts("a", "b", "c"). + WithAppCreator(testnode.CustomAppCreator("0utia")) + ctx, _, _ := testnode.NewNetwork(t, testnodeConfig) + _, err := ctx.WaitForHeight(1) + require.NoError(t, err) + txClient, err := user.SetupTxClient(ctx.GoContext(), ctx.Keyring, ctx.GRPCClient, encCfg, user.WithGasMultiplier(1.2)) + require.NoError(t, err) + return encCfg, txClient, ctx +} diff --git a/pkg/wrapper/README.md b/pkg/wrapper/README.md index e58188bd7a..cacbd55dda 100644 --- a/pkg/wrapper/README.md +++ b/pkg/wrapper/README.md @@ -76,9 +76,9 @@ One namespace ID is located in the first `NamespaceIDSize` bytes, while the othe ## References - Namespaced Merkle tree specifications: -- Celestia original data square specification: -- Celestia constants: -- Celestia reserved namespace IDs: +- Celestia original data square specification: +- Celestia constants: +- Celestia reserved namespace IDs: [nmtlink]: https://github.com/celestiaorg/nmt/blob/master/docs/spec/nmt.md [nmtwrapper-link]: https://github.com/celestiaorg/celestia-app/blob/main/pkg/wrapper/nmt_wrapper.go diff --git a/scripts/arabica-block-sync.sh b/scripts/arabica-block-sync.sh new file mode 100755 index 0000000000..1b911ccdfb --- /dev/null +++ b/scripts/arabica-block-sync.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +# This script starts a consensus node on Arabica and block syncs from genesis to +# the tip of the chain. + +# Stop script execution if an error is encountered +set -o errexit +# Stop script execution if an undefined variable is used +set -o nounset + +CHAIN_ID="arabica-11" +NODE_NAME="node-name" +SEEDS="827583022cc6ce65cf762115642258f937c954cd@validator-1.celestia-arabica-11.com:26656,74e42b39f512f844492ff09e30af23d54579b7bc@validator-2.celestia-arabica-11.com:26656,00d577159b2eb1f524ef9c37cb389c020a2c38d2@validator-3.celestia-arabica-11.com:26656,b2871b6dc2e18916d07264af0e87c456c2bba04f@validator-4.celestia-arabica-11.com:26656" + +CELESTIA_APP_HOME="${HOME}/.celestia-app" +CELESTIA_APP_VERSION=$(celestia-appd version 2>&1) + +echo "celestia-app home: ${CELESTIA_APP_HOME}" +echo "celestia-app version: ${CELESTIA_APP_VERSION}" +echo "" + +# Ask the user for confirmation before deleting the existing celestia-app home +# directory. +read -p "Are you sure you want to delete: $CELESTIA_APP_HOME? [y/n] " response + +# Check the user's response +if [ "$response" != "y" ]; then + # Exit if the user did not respond with "y" + echo "You must delete $CELESTIA_APP_HOME to continue." + exit 1 +fi + +echo "Deleting $CELESTIA_APP_HOME..." +rm -r "$CELESTIA_APP_HOME" + +echo "Initializing config files..." +celestia-appd init ${NODE_NAME} --chain-id ${CHAIN_ID} > /dev/null 2>&1 # Hide output to reduce terminal noise + +echo "Settings seeds in config.toml..." +sed -i.bak -e "s/^seeds *=.*/seeds = \"$SEEDS\"/" $CELESTIA_APP_HOME/config/config.toml + +echo "Downloading genesis file..." +celestia-appd download-genesis ${CHAIN_ID} + +echo "Starting celestia-appd..." +celestia-appd start --v2-upgrade-height 1751707 --force-no-bbr diff --git a/scripts/arabica.sh b/scripts/arabica.sh index fe88e26c89..d09b9d8441 100755 --- a/scripts/arabica.sh +++ b/scripts/arabica.sh @@ -57,4 +57,4 @@ echo "Downloading genesis file..." celestia-appd download-genesis ${CHAIN_ID} echo "Starting celestia-appd..." -celestia-appd start --v2-upgrade-height 1751707 +celestia-appd start --v2-upgrade-height 1751707 --force-no-bbr diff --git a/scripts/mainnet.sh b/scripts/mainnet.sh index 317fe5dbdc..aa92e13326 100755 --- a/scripts/mainnet.sh +++ b/scripts/mainnet.sh @@ -1,5 +1,8 @@ #!/bin/sh +# This script starts a consensus node on Mainnet Beta and state syncs to the tip +# of the chain. + # Stop script execution if an error is encountered set -o errexit # Stop script execution if an undefined variable is used @@ -10,6 +13,7 @@ NODE_NAME="node-name" SEEDS="e6116822e1a5e283d8a85d3ec38f4d232274eaf3@consensus-full-seed-1.celestia-bootstrap.net:26656,cf7ac8b19ff56a9d47c75551bd4864883d1e24b5@consensus-full-seed-2.celestia-bootstrap.net:26656" CELESTIA_APP_HOME="${HOME}/.celestia-app" CELESTIA_APP_VERSION=$(celestia-appd version 2>&1) +RPC="https://celestia-rpc.polkachu.com:443" echo "celestia-app home: ${CELESTIA_APP_HOME}" echo "celestia-app version: ${CELESTIA_APP_VERSION}" @@ -35,10 +39,20 @@ celestia-appd init ${NODE_NAME} --chain-id ${CHAIN_ID} > /dev/null 2>&1 # Hide o echo "Settings seeds in config.toml..." sed -i.bak -e "s/^seeds *=.*/seeds = \"$SEEDS\"/" $CELESTIA_APP_HOME/config/config.toml +LATEST_HEIGHT=$(curl -s $RPC/block | jq -r .result.block.header.height); +BLOCK_HEIGHT=$((LATEST_HEIGHT - 2000)); \ +TRUST_HASH=$(curl -s "$RPC/block?height=$BLOCK_HEIGHT" | jq -r .result.block_id.hash) + +echo "Block height: $BLOCK_HEIGHT" +echo "Trust hash: $TRUST_HASH" +echo "Enabling state sync in config.toml..." +sed -i.bak -E "s|^(enable[[:space:]]+=[[:space:]]+).*$|\1true| ; \ +s|^(rpc_servers[[:space:]]+=[[:space:]]+).*$|\1\"$RPC,$RPC\"| ; \ +s|^(trust_height[[:space:]]+=[[:space:]]+).*$|\1$BLOCK_HEIGHT| ; \ +s|^(trust_hash[[:space:]]+=[[:space:]]+).*$|\1\"$TRUST_HASH\"|" $HOME/.celestia-app/config/config.toml + echo "Downloading genesis file..." celestia-appd download-genesis ${CHAIN_ID} > /dev/null 2>&1 # Hide output to reduce terminal noise -echo "Starting celestia-appd in the background and piping logs to mainnet.log" -nohup celestia-appd start > "${HOME}/mainnet.log" 2>&1 & - -echo "You can check the node's status via: celestia-appd status" +echo "Starting celestia-appd..." +celestia-appd start --v2-upgrade-height 2371495 diff --git a/scripts/mocha-block-sync.sh b/scripts/mocha-block-sync.sh new file mode 100755 index 0000000000..3f8795dd6e --- /dev/null +++ b/scripts/mocha-block-sync.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +# This script starts a consensus node on Mocha and block syncs from genesis to +# the tip of the chain. This is expected to take a few weeks. + +# Stop script execution if an error is encountered +set -o errexit +# Stop script execution if an undefined variable is used +set -o nounset + +CHAIN_ID="mocha-4" +NODE_NAME="node-name" +SEEDS="ee9f90974f85c59d3861fc7f7edb10894f6ac3c8@seed-mocha.pops.one:26656,258f523c96efde50d5fe0a9faeea8a3e83be22ca@seed.mocha-4.celestia.aviaone.com:20279,5d0bf034d6e6a8b5ee31a2f42f753f1107b3a00e@celestia-testnet-seed.itrocket.net:11656,7da0fb48d6ef0823bc9770c0c8068dd7c89ed4ee@celest-test-seed.theamsolutions.info:443" + +CELESTIA_APP_HOME="${HOME}/.celestia-app" +CELESTIA_APP_VERSION=$(celestia-appd version 2>&1) + +echo "celestia-app home: ${CELESTIA_APP_HOME}" +echo "celestia-app version: ${CELESTIA_APP_VERSION}" +echo "" + +# Ask the user for confirmation before deleting the existing celestia-app home +# directory. +read -p "Are you sure you want to delete: $CELESTIA_APP_HOME? [y/n] " response + +# Check the user's response +if [ "$response" != "y" ]; then + # Exit if the user did not respond with "y" + echo "You must delete $CELESTIA_APP_HOME to continue." + exit 1 +fi + +echo "Deleting $CELESTIA_APP_HOME..." +rm -r "$CELESTIA_APP_HOME" + +echo "Initializing config files..." +celestia-appd init ${NODE_NAME} --chain-id ${CHAIN_ID} > /dev/null 2>&1 # Hide output to reduce terminal noise + +echo "Settings seeds in config.toml..." +sed -i.bak -e "s/^seeds *=.*/seeds = \"$SEEDS\"/" $CELESTIA_APP_HOME/config/config.toml + +echo "Downloading genesis file..." +celestia-appd download-genesis ${CHAIN_ID} > /dev/null 2>&1 # Hide output to reduce terminal noise + +echo "Starting celestia-appd..." +celestia-appd start --v2-upgrade-height 2585031 --force-no-bbr diff --git a/scripts/mocha.sh b/scripts/mocha.sh index 5447eb9e3b..56c0e89480 100755 --- a/scripts/mocha.sh +++ b/scripts/mocha.sh @@ -57,4 +57,4 @@ echo "Downloading genesis file..." celestia-appd download-genesis ${CHAIN_ID} > /dev/null 2>&1 # Hide output to reduce terminal noise echo "Starting celestia-appd..." -celestia-appd start --v2-upgrade-height 2585031 +celestia-appd start --v2-upgrade-height 2585031 --force-no-bbr diff --git a/scripts/single-node.sh b/scripts/single-node.sh index d7d19b4f07..b6a6e6486a 100755 --- a/scripts/single-node.sh +++ b/scripts/single-node.sh @@ -1,5 +1,7 @@ #!/bin/sh +# This script starts a single node testnet. + # Stop script execution if an error is encountered set -o errexit # Stop script execution if an undefined variable is used @@ -60,7 +62,7 @@ createGenesis() { # If you encounter: `sed: -I or -i may not be used with stdin` on MacOS you can mitigate by installing gnu-sed # https://gist.github.com/andre3k1/e3a1a7133fded5de5a9ee99c87c6fa0d?permalink_comment_id=3082272#gistcomment-3082272 - # Override the default RPC servier listening address + # Override the default RPC server listening address sed -i'.bak' 's#"tcp://127.0.0.1:26657"#"tcp://0.0.0.0:26657"#g' "${CELESTIA_APP_HOME}"/config/config.toml # Enable transaction indexing diff --git a/scripts/state-sync.sh b/scripts/state-sync.sh new file mode 100755 index 0000000000..9d7a8417b0 --- /dev/null +++ b/scripts/state-sync.sh @@ -0,0 +1,108 @@ +#!/bin/sh + +# This script starts a single node and attempts to state sync with a node +# started via ./single-node.sh + +# Stop script execution if an error is encountered +set -o errexit +# Stop script execution if an undefined variable is used +set -o nounset + +if ! [ -x "$(command -v celestia-appd)" ] +then + echo "celestia-appd could not be found. Please install the celestia-appd binary using 'make install' and make sure the PATH contains the directory where the binary exists. By default, go will install the binary under '~/go/bin'" + exit 1 +fi + +CHAIN_ID="test" +KEY_NAME="validator" +KEYRING_BACKEND="test" +COINS="1000000000000000utia" +DELEGATION_AMOUNT="5000000000utia" +SINGLE_NODE_HOME="${HOME}/.celestia-app" +CELESTIA_APP_HOME="${HOME}/.celestia-app-state-sync" +CELESTIA_APP_VERSION=$(celestia-appd version 2>&1) +GENESIS_FILE="${CELESTIA_APP_HOME}/config/genesis.json" +FEES="500utia" +RPC="0.0.0.0:26657" + +echo "celestia-app home: ${CELESTIA_APP_HOME}" +echo "celestia-app version: ${CELESTIA_APP_VERSION}" +echo "" + +BLOCK_HEIGHT=$(curl -s $RPC/block | jq -r .result.block.header.height); +TRUST_HASH=$(curl -s "$RPC/block?height=$BLOCK_HEIGHT" | jq -r .result.block_id.hash) + +echo "Block height: $BLOCK_HEIGHT" +echo "Trust hash: $TRUST_HASH" +echo "Enabling state sync in config.toml..." +sed -i.bak -E "s|^(enable[[:space:]]+=[[:space:]]+).*$|\1true| ; \ +s|^(rpc_servers[[:space:]]+=[[:space:]]+).*$|\1\"$RPC,$RPC\"| ; \ +s|^(trust_height[[:space:]]+=[[:space:]]+).*$|\1$BLOCK_HEIGHT| ; \ +s|^(trust_hash[[:space:]]+=[[:space:]]+).*$|\1\"$TRUST_HASH\"|" $CELESTIA_APP_HOME/config/config.toml + +PEER=$(curl -s http://${RPC}/status | jq -r '.result.node_info.id + "@127.0.0.1:26656"') +echo "Setting persistent peer to ${PEER}" + +createGenesis() { + echo "Initializing validator and node config files..." + celestia-appd init ${CHAIN_ID} \ + --chain-id ${CHAIN_ID} \ + --home "${CELESTIA_APP_HOME}" \ + > /dev/null 2>&1 # Hide output to reduce terminal noise + + echo "Adding a new key to the keyring..." + celestia-appd keys add ${KEY_NAME} \ + --keyring-backend=${KEYRING_BACKEND} \ + --home "${CELESTIA_APP_HOME}" \ + > /dev/null 2>&1 # Hide output to reduce terminal noise + + echo "Copying genesis.json from the node started via ./single-node.sh..." + cp ${SINGLE_NODE_HOME}/config/genesis.json ${CELESTIA_APP_HOME}/config/genesis.json + + # If you encounter: `sed: -I or -i may not be used with stdin` on MacOS you can mitigate by installing gnu-sed + # https://gist.github.com/andre3k1/e3a1a7133fded5de5a9ee99c87c6fa0d?permalink_comment_id=3082272#gistcomment-3082272 + + # Override the default RPC server listening address to not conflict with the node started via ./single-node.sh + sed -i'.bak' 's#"tcp://127.0.0.1:26657"#"tcp://0.0.0.0:26000"#g' "${CELESTIA_APP_HOME}"/config/config.toml + + # Override the p2p address to not conflict with the node started via ./single-node.sh + sed -i'.bak' 's#laddr = "tcp://0.0.0.0:26656"#laddr = "tcp://0.0.0.0:36656"#g' "${CELESTIA_APP_HOME}"/config/config.toml + + # Enable transaction indexing + sed -i'.bak' 's#"null"#"kv"#g' "${CELESTIA_APP_HOME}"/config/config.toml + + # Persist ABCI responses + sed -i'.bak' 's#discard_abci_responses = true#discard_abci_responses = false#g' "${CELESTIA_APP_HOME}"/config/config.toml + + # Override the log level to debug + # sed -i'.bak' 's#log_level = "info"#log_level = "debug"#g' "${CELESTIA_APP_HOME}"/config/config.toml +} + +deleteCelestiaAppHome() { + echo "Deleting $CELESTIA_APP_HOME..." + rm -r "$CELESTIA_APP_HOME" +} + +startCelestiaApp() { + echo "Starting celestia-app..." + celestia-appd start \ + --home "${CELESTIA_APP_HOME}" \ + --grpc.enable \ + --grpc.address="0.0.0.0:9999" \ + --p2p.persistent_peers=${PEER} \ + --fast_sync false \ + --v2-upgrade-height 3 +} + +if [ -f $GENESIS_FILE ]; then + echo "Do you want to delete existing ${CELESTIA_APP_HOME}? [y/n]" + read -r response + if [ "$response" = "y" ]; then + deleteCelestiaAppHome + createGenesis + fi +else + createGenesis +fi +startCelestiaApp diff --git a/specs/src/SUMMARY.md b/specs/src/SUMMARY.md index f5aa23cec1..e6c4633574 100644 --- a/specs/src/SUMMARY.md +++ b/specs/src/SUMMARY.md @@ -12,6 +12,7 @@ Celestia App Specifications - [AnteHandler](./ante_handler.md) - [AnteHandler v1](./ante_handler_v1.md) - [AnteHandler v2](./ante_handler_v2.md) + - [AnteHandler v3](./ante_handler_v3.md) - [Fraud Proofs](./fraud_proofs.md) - [Networking](./networking.md) - [Public-Key Cryptography](./public_key_cryptography.md) @@ -24,3 +25,4 @@ Celestia App Specifications - [Parameters](./parameters.md) - [Parameters v1](./parameters_v1.md) - [Parameters v2](./parameters_v2.md) + - [Parameters v3](./parameters_v3.md) diff --git a/specs/src/ante_handler.md b/specs/src/ante_handler.md index 2ebe759a79..d002c1e7eb 100644 --- a/specs/src/ante_handler.md +++ b/specs/src/ante_handler.md @@ -11,3 +11,4 @@ The AnteHandler is defined in `app/ante/ante.go`. The app version impacts AnteHa - [AnteHandler v1](./ante_handler_v1.md) - [AnteHandler v2](./ante_handler_v2.md) +- [AnteHandler v3](./ante_handler_v3.md) diff --git a/specs/src/ante_handler_v3.md b/specs/src/ante_handler_v3.md new file mode 100644 index 0000000000..cff67cd117 --- /dev/null +++ b/specs/src/ante_handler_v3.md @@ -0,0 +1,26 @@ +# AnteHandler v3 + +The AnteHandler chains together several decorators to ensure the following criteria are met for app version 3: + +- The tx does not contain any messages that are unsupported by the current app version. See `MsgVersioningGateKeeper`. +- The tx size is not larger than the application's configured versioned constant [MaxTxSize](https://github.com/celestiaorg/celestia-app/blob/8ba82c1b872b7f5686d9bb91b93a0442223d7bb2/pkg/appconsts/v3/app_consts.go#L9). +- The tx does not contain any [extension options](https://github.com/cosmos/cosmos-sdk/blob/22c28366466e64ebf0df1ce5bec8b1130523552c/proto/cosmos/tx/v1beta1/tx.proto#L119-L122). +- The tx passes `ValidateBasic()`. +- The tx's [timeout_height](https://github.com/cosmos/cosmos-sdk/blob/22c28366466e64ebf0df1ce5bec8b1130523552c/proto/cosmos/tx/v1beta1/tx.proto#L115-L117) has not been reached if one is specified. +- The tx's [memo](https://github.com/cosmos/cosmos-sdk/blob/22c28366466e64ebf0df1ce5bec8b1130523552c/proto/cosmos/tx/v1beta1/tx.proto#L110-L113) is <= the max memo characters where [`MaxMemoCharacters = 256`](). +- The tx's [gas_limit](https://github.com/cosmos/cosmos-sdk/blob/22c28366466e64ebf0df1ce5bec8b1130523552c/proto/cosmos/tx/v1beta1/tx.proto#L211-L213) is > the gas consumed based on the tx's size where [`TxSizeCostPerByte = 10`](https://github.com/celestiaorg/celestia-app/blob/32fc6903478ea08eba728ac9cd4ffedf9ef72d98/pkg/appconsts/v3/app_consts.go#L8). +- The tx's feepayer has enough funds to pay fees for the tx. The tx's feepayer is the feegranter (if specified) or the tx's first signer. Note the [feegrant](https://github.com/cosmos/cosmos-sdk/blob/v0.46.15/x/feegrant/README.md) module is enabled. +- The tx's gas price is >= the network minimum gas price where [`NetworkMinGasPrice = 0.000001` utia](https://github.com/celestiaorg/celestia-app/blob/32fc6903478ea08eba728ac9cd4ffedf9ef72d98/pkg/appconsts/initial_consts.go#L33). +- The tx's count of signatures <= the max number of signatures. The max number of signatures is [`TxSigLimit = 7`](https://github.com/cosmos/cosmos-sdk/blob/a429238fc267da88a8548bfebe0ba7fb28b82a13/x/auth/README.md?plain=1#L231). +- The tx's [gas_limit](https://github.com/cosmos/cosmos-sdk/blob/22c28366466e64ebf0df1ce5bec8b1130523552c/proto/cosmos/tx/v1beta1/tx.proto#L211-L213) is > the gas consumed based on the tx's signatures. +- The tx's [signatures](https://github.com/cosmos/cosmos-sdk/blob/22c28366466e64ebf0df1ce5bec8b1130523552c/types/tx/signing/signature.go#L10-L26) are valid. For each signature, ensure that the signature's sequence number (a.k.a nonce) matches the account sequence number of the signer. +- The tx's [gas_limit](https://github.com/cosmos/cosmos-sdk/blob/22c28366466e64ebf0df1ce5bec8b1130523552c/proto/cosmos/tx/v1beta1/tx.proto#L211-L213) is > the gas consumed based on the blob size(s). Since blobs are charged based on the number of shares they occupy, the gas consumed is calculated as follows: `gasToConsume = sharesNeeded(blob) * bytesPerShare * gasPerBlobByte`. Where `bytesPerShare` is a global constant (an alias for [`ShareSize = 512`](https://github.com/celestiaorg/celestia-app/blob/c90e61d5a2d0c0bd0e123df4ab416f6f0d141b7f/pkg/appconsts/global_consts.go#L27-L28)) and `gasPerBlobByte` is a versioned constant that can be modified through hard forks (the [`DefaultGasPerBlobByte = 8`](https://github.com/celestiaorg/celestia-app/blob/32fc6903478ea08eba728ac9cd4ffedf9ef72d98/pkg/appconsts/v3/app_consts.go#L8)). +- The tx's total blob share count is <= the max blob share count. The max blob share count is derived from the maximum valid square size. The max valid square size is the minimum of: `GovMaxSquareSize` and `SquareSizeUpperBound`. +- The tx does not contain a message of type [MsgSubmitProposal](https://github.com/cosmos/cosmos-sdk/blob/d6d929843bbd331b885467475bcb3050788e30ca/proto/cosmos/gov/v1/tx.proto#L33-L43) with zero proposal messages. +- The tx is not an IBC packet or update message that has already been processed. + +In addition to the above criteria, the AnteHandler also has a number of side-effects: + +- Tx fees are deducted from the tx's feepayer and added to the fee collector module account. +- Tx priority is calculated based on the smallest denomination of gas price in the tx and set in context. +- The nonce of all tx signers is incremented by 1. diff --git a/specs/src/consensus.md b/specs/src/consensus.md index 69dc17f136..fa708ce95d 100644 --- a/specs/src/consensus.md +++ b/specs/src/consensus.md @@ -32,6 +32,7 @@ | `SHARE_INFO_BYTES` | `uint64` | `1` | `byte` | The number of bytes used for [share](data_structures.md#share) information | | `SHARE_RESERVED_BYTES` | `uint64` | `4` | `byte` | The number of bytes used to store the index of the first transaction in a transaction share. Must be able to represent any integer up to and including `SHARE_SIZE - 1`. | | `SHARE_SIZE` | `uint64` | `512` | `byte` | Size of transaction and blob [shares](data_structures.md#share), in bytes. | +| `SignerSize` | `int` | `20` | `byte` | The number of bytes used to store the signer in a [share](data_structures.md#share). | | `STATE_SUBTREE_RESERVED_BYTES` | `uint64` | `1` | `byte` | Number of bytes reserved to identify state subtrees. | | `UNBONDING_DURATION` | `uint32` | | `block` | Duration, in blocks, for unbonding a validator or delegation. | | `v1.Version` | `uint64` | `1` | | First version of the application. Breaking changes (hard forks) must update this parameter. | diff --git a/specs/src/figures/first_share_with_signer.dot b/specs/src/figures/first_share_with_signer.dot new file mode 100644 index 0000000000..d22e7eff1e --- /dev/null +++ b/specs/src/figures/first_share_with_signer.dot @@ -0,0 +1,25 @@ +digraph G { + node [shape = record, penwidth = 0]; + + share [label=< + + + + + + + + + + + + + + + + + + +
0129303454512
namespace versionnamespace idinfo bytesequence lengthsignerblob1
+ >]; +} diff --git a/specs/src/figures/first_share_with_signer.svg b/specs/src/figures/first_share_with_signer.svg new file mode 100644 index 0000000000..71dfe3d8e3 --- /dev/null +++ b/specs/src/figures/first_share_with_signer.svg @@ -0,0 +1,31 @@ + + + +G + + + +share + +0 +1 +29 +30 +34 +54 +512 + +namespace version + +namespace id + +info byte + +sequence length + +signer + +blob1 + + + \ No newline at end of file diff --git a/specs/src/parameters.md b/specs/src/parameters.md index 4dcecce93d..2e32514c9d 100644 --- a/specs/src/parameters.md +++ b/specs/src/parameters.md @@ -4,3 +4,4 @@ The parameters in the application depend on the app version: - [Parameters v1](./parameters_v1.md) - [Parameters v2](./parameters_v2.md) +- [Parameters v3](./parameters_v3.md) diff --git a/specs/src/parameters_v2.md b/specs/src/parameters_v2.md index 7555dd8761..9bd3e63f25 100644 --- a/specs/src/parameters_v2.md +++ b/specs/src/parameters_v2.md @@ -54,6 +54,7 @@ hardcoded in the application or they are blocked by the `x/paramfilter` module. | mint.DisinflationRate | 0.10 (10%) | The rate at which the inflation rate decreases each year. | False | | mint.InitialInflationRate | 0.08 (8%) | The inflation rate the network starts at. | False | | mint.TargetInflationRate | 0.015 (1.5%) | The inflation rate that the network aims to stabilize at. | False | +| packetfowardmiddleware.FeePercentage | 0 | % of the forwarded packet amount which will be subtracted and distributed to the community pool. | True | | slashing.DowntimeJailDuration | 1 min | Duration of time a validator must stay jailed. | True | | slashing.MinSignedPerWindow | 0.75 (75%) | The percentage of SignedBlocksWindow that must be signed not to get jailed. | True | | slashing.SignedBlocksWindow | 5000 | The range of blocks used to count for downtime. | True | diff --git a/specs/src/parameters_v3.md b/specs/src/parameters_v3.md new file mode 100644 index 0000000000..860cdb286f --- /dev/null +++ b/specs/src/parameters_v3.md @@ -0,0 +1,72 @@ +# Parameters v3 + +The parameters below represent the parameters for app version 3. + +Note that not all of these parameters are changeable via governance. This list +also includes parameter that require a hardfork to change due to being manually +hardcoded in the application or they are blocked by the `x/paramfilter` module. + +## Global parameters + +| Parameter | Default | Summary | Changeable via Governance | +|-------------------|---------|------------------------------------------------------------------------------------------------------------------------|---------------------------| +| MaxBlockSizeBytes | 100MiB | Hardcoded value in CometBFT for the protobuf encoded block. | False | +| MaxSquareSize | 128 | Hardcoded maximum square size determined per shares per row or column for the original data square (not yet extended). | False | + +## Module parameters + +| Module.Parameter | Default | Summary | Changeable via Governance | +|-----------------------------------------------|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------|---------------------------| +| auth.MaxMemoCharacters | 256 | Largest allowed size for a memo in bytes. | True | +| auth.SigVerifyCostED25519 | 590 | Gas used to verify Ed25519 signature. | True | +| auth.SigVerifyCostSecp256k1 | 1000 | Gas used to verify secp256k1 signature. | True | +| auth.TxSigLimit | 7 | Max number of signatures allowed in a multisig transaction. | True | +| auth.TxSizeCostPerByte | 10 | Gas used per transaction byte. | False | +| bank.SendEnabled | true | Allow transfers. | False | +| blob.GasPerBlobByte | 8 | Gas used per blob byte. | False | +| blob.GovMaxSquareSize | 64 | Governance parameter for the maximum square size of the original data square. | True | +| consensus.block.MaxBytes | 1974272 bytes (~1.88 MiB) | Governance parameter for the maximum size of the protobuf encoded block. | True | +| consensus.block.MaxGas | -1 | Maximum gas allowed per block (-1 is infinite). | True | +| consensus.block.TimeIotaMs | 1000 | Minimum time added to the time in the header each block. | False | +| consensus.evidence.MaxAgeDuration | 1814400000000000 (21 days) | The maximum age of evidence before it is considered invalid in nanoseconds. This value should be identical to the unbonding period. | True | +| consensus.evidence.MaxAgeNumBlocks | 120960 | The maximum number of blocks before evidence is considered invalid. This value will stop CometBFT from pruning block data. | True | +| consensus.evidence.MaxBytes | 1MiB | Maximum size in bytes used by evidence in a given block. | True | +| consensus.validator.PubKeyTypes | Ed25519 | The type of public key used by validators. | False | +| consensus.Version.AppVersion | 3 | Determines protocol rules used for a given height. Incremented by the application upon an upgrade. | True | +| distribution.BaseProposerReward | 0 | Reward in the mint denomination for proposing a block. | True | +| distribution.BonusProposerReward | 0 | Extra reward in the mint denomination for proposers based on the voting power included in the commit. | True | +| distribution.CommunityTax | 0.02 (2%) | Percentage of the inflation sent to the community pool. | True | +| distribution.WithdrawAddrEnabled | true | Enables delegators to withdraw funds to a different address. | True | +| gov.DepositParams.MaxDepositPeriod | 604800000000000 (1 week) | Maximum period for token holders to deposit on a proposal in nanoseconds. | True | +| gov.DepositParams.MinDeposit | 10_000_000_000 utia (10,000 TIA) | Minimum deposit for a proposal to enter voting period. | True | +| gov.TallyParams.Quorum | 0.334 (33.4%) | Minimum percentage of total stake needed to vote for a result to be considered valid. | True | +| gov.TallyParams.Threshold | 0.50 (50%) | Minimum proportion of Yes votes for proposal to pass. | True | +| gov.TallyParams.VetoThreshold | 0.334 (33.4%) | Minimum value of Veto votes to Total votes ratio for proposal to be vetoed. | True | +| gov.VotingParams.VotingPeriod | 604800000000000 (1 week) | Duration of the voting period in nanoseconds. | True | +| ibc.ClientGenesis.AllowedClients | []string{"06-solomachine", "07-tendermint"} | List of allowed IBC light clients. | True | +| ibc.ConnectionGenesis.MaxExpectedTimePerBlock | 7500000000000 (75 seconds) | Maximum expected time per block in nanoseconds under normal operation. | True | +| ibc.Transfer.ReceiveEnabled | true | Enable receiving tokens via IBC. | True | +| ibc.Transfer.SendEnabled | true | Enable sending tokens via IBC. | True | +| icahost.HostEnabled | True | Enables or disables the Inter-Chain Accounts host module. | True | +| icahost.AllowMessages | [icaAllowMessages] | Defines a list of sdk message typeURLs allowed to be executed on a host chain. | True | +| minfee.NetworkMinGasPrice | 0.000001 utia | All transactions must have a gas price greater than or equal to this value. | True | +| mint.BondDenom | utia | Denomination that is inflated and sent to the distribution module account. | False | +| mint.DisinflationRate | 0.10 (10%) | The rate at which the inflation rate decreases each year. | False | +| mint.InitialInflationRate | 0.08 (8%) | The inflation rate the network starts at. | False | +| mint.TargetInflationRate | 0.015 (1.5%) | The inflation rate that the network aims to stabilize at. | False | +| packetfowardmiddleware.FeePercentage | 0 | % of the forwarded packet amount which will be subtracted and distributed to the community pool. | True | +| slashing.DowntimeJailDuration | 1 min | Duration of time a validator must stay jailed. | True | +| slashing.MinSignedPerWindow | 0.75 (75%) | The percentage of SignedBlocksWindow that must be signed not to get jailed. | True | +| slashing.SignedBlocksWindow | 5000 | The range of blocks used to count for downtime. | True | +| slashing.SlashFractionDoubleSign | 0.02 (2%) | Percentage slashed after a validator is jailed for double signing. | True | +| slashing.SlashFractionDowntime | 0.00 (0%) | Percentage slashed after a validator is jailed for downtime. | True | +| staking.BondDenom | utia | Bondable coin denomination. | False | +| staking.HistoricalEntries | 10000 | Number of historical entries to persist in store. | True | +| staking.MaxEntries | 7 | Maximum number of entries in the redelegation queue. | True | +| staking.MaxValidators | 100 | Maximum number of validators. | True | +| staking.MinCommissionRate | 0.05 (5%) | Minimum commission rate used by all validators. | True | +| staking.UnbondingTime | 1814400 (21 days) | Duration of time for unbonding in seconds. | False | + +Note: none of the mint module parameters are governance modifiable because they have been converted into hardcoded constants. See the x/mint README.md for more details. + +[icaAllowMessages]: https://github.com/rootulp/celestia-app/blob/8caa5807df8d15477554eba953bd056ae72d4503/app/ica_host.go#L3-L18 diff --git a/specs/src/shares.md b/specs/src/shares.md index ce84a8950a..59ed91c9f3 100644 --- a/specs/src/shares.md +++ b/specs/src/shares.md @@ -23,6 +23,12 @@ User submitted transactions are split into shares (see [share splitting](#share- ## Share Format +### Share Version + +The share version is a 7-bit big-endian unsigned integer that is used to indicate the version of the [share format](#share-format). A new share version MUST be introduced if the share format changes in a way that is not backwards compatible. There are two share versions [share version 0](#share-version-0) and [share version 1](#share-version-1). + +### Share Version 0 + Every share has a fixed size [`SHARE_SIZE`](./consensus.md#constants). The share format below is consistent for all shares: - The first [`NAMESPACE_VERSION_SIZE`](./consensus.md#constants) bytes of a share's raw data is the namespace version of that share (denoted by "namespace version" in the figure below). @@ -44,9 +50,17 @@ Continuation share in a sequence: Since raw data that exceeds [`SHARE_SIZE`](./consensus.md#constants)`-`[`NAMESPACE_SIZE`](./consensus.md#constants)`-`[`SHARE_INFO_BYTES`](./consensus.md#constants) `-` [`SEQUENCE_BYTES`](./consensus.md#constants) bytes will span more than one share, developers MAY choose to encode additional metadata in their raw blob data prior to inclusion in a Celestia block. For example, Celestia transaction shares encode additional metadata in the form of "reserved bytes". -### Share Version +### Share Version 1 + +Share version 1 is similar to share version 0 with the addition of a `signer` field. The signer is located after the sequence length in the first share. The signer is [`SIGNER_SIZE`](./consensus.md#constants) bytes. + +First share in a sequence with signer: + +![figure 3: first share with signer](./figures/first_share_with_signer.svg) + +Continuation share in a sequence: -The share version is a 7-bit big-endian unsigned integer that is used to indicate the version of the [share format](#share-format). The only supported share version is `0`. A new share version MUST be introduced if the share format changes in a way that is not backwards compatible. +![figure 4: share continuation](./figures/share_continuation.svg) ## Transaction Shares diff --git a/specs/src/specs/ante_handler.md b/specs/src/specs/ante_handler.md deleted file mode 100644 index 12710bdb85..0000000000 --- a/specs/src/specs/ante_handler.md +++ /dev/null @@ -1 +0,0 @@ -# AnteHandler diff --git a/specs/src/specs/ante_handler_v1.md b/specs/src/specs/ante_handler_v1.md deleted file mode 100644 index a5c3cb3fdf..0000000000 --- a/specs/src/specs/ante_handler_v1.md +++ /dev/null @@ -1 +0,0 @@ -# AnteHandler v1 diff --git a/specs/src/specs/ante_handler_v2.md b/specs/src/specs/ante_handler_v2.md deleted file mode 100644 index 9ebb88a269..0000000000 --- a/specs/src/specs/ante_handler_v2.md +++ /dev/null @@ -1 +0,0 @@ -# AnteHandler v2 diff --git a/specs/src/specs/block_proposer.md b/specs/src/specs/block_proposer.md deleted file mode 100644 index a29ade0863..0000000000 --- a/specs/src/specs/block_proposer.md +++ /dev/null @@ -1 +0,0 @@ -# Block Proposer diff --git a/specs/src/specs/block_validity_rules.md b/specs/src/specs/block_validity_rules.md deleted file mode 100644 index 204d783573..0000000000 --- a/specs/src/specs/block_validity_rules.md +++ /dev/null @@ -1 +0,0 @@ -# Block Validity Rules diff --git a/specs/src/specs/cat_pool.md b/specs/src/specs/cat_pool.md deleted file mode 100644 index eb6559023a..0000000000 --- a/specs/src/specs/cat_pool.md +++ /dev/null @@ -1 +0,0 @@ -# CAT Pool diff --git a/specs/src/specs/consensus.md b/specs/src/specs/consensus.md deleted file mode 100644 index 5cfc2c950c..0000000000 --- a/specs/src/specs/consensus.md +++ /dev/null @@ -1 +0,0 @@ -# Consensus diff --git a/specs/src/specs/data_square_layout.md b/specs/src/specs/data_square_layout.md deleted file mode 100644 index 4be6fe18f6..0000000000 --- a/specs/src/specs/data_square_layout.md +++ /dev/null @@ -1 +0,0 @@ -# Data Square Layout diff --git a/specs/src/specs/data_structures.md b/specs/src/specs/data_structures.md deleted file mode 100644 index c73ca16086..0000000000 --- a/specs/src/specs/data_structures.md +++ /dev/null @@ -1 +0,0 @@ -# Data Structures diff --git a/specs/src/specs/fraud_proofs.md b/specs/src/specs/fraud_proofs.md deleted file mode 100644 index 8f33083bce..0000000000 --- a/specs/src/specs/fraud_proofs.md +++ /dev/null @@ -1 +0,0 @@ -# Fraud Proofs diff --git a/specs/src/specs/multisig.md b/specs/src/specs/multisig.md deleted file mode 100644 index 206d574852..0000000000 --- a/specs/src/specs/multisig.md +++ /dev/null @@ -1 +0,0 @@ -# Multisig diff --git a/specs/src/specs/namespace.md b/specs/src/specs/namespace.md deleted file mode 100644 index aae590a94d..0000000000 --- a/specs/src/specs/namespace.md +++ /dev/null @@ -1 +0,0 @@ -# Namespace diff --git a/specs/src/specs/networking.md b/specs/src/specs/networking.md deleted file mode 100644 index 048579a4c1..0000000000 --- a/specs/src/specs/networking.md +++ /dev/null @@ -1 +0,0 @@ -# Networking diff --git a/specs/src/specs/parameters.md b/specs/src/specs/parameters.md deleted file mode 100644 index 04f9547d32..0000000000 --- a/specs/src/specs/parameters.md +++ /dev/null @@ -1 +0,0 @@ -# Parameters diff --git a/specs/src/specs/parameters_v1.md b/specs/src/specs/parameters_v1.md deleted file mode 100644 index eae5f5081a..0000000000 --- a/specs/src/specs/parameters_v1.md +++ /dev/null @@ -1 +0,0 @@ -# Parameters v1 diff --git a/specs/src/specs/parameters_v2.md b/specs/src/specs/parameters_v2.md deleted file mode 100644 index 0aa1503bd2..0000000000 --- a/specs/src/specs/parameters_v2.md +++ /dev/null @@ -1 +0,0 @@ -# Parameters v2 diff --git a/specs/src/specs/public_key_cryptography.md b/specs/src/specs/public_key_cryptography.md deleted file mode 100644 index 5234fcb002..0000000000 --- a/specs/src/specs/public_key_cryptography.md +++ /dev/null @@ -1 +0,0 @@ -# Public-Key Cryptography diff --git a/specs/src/specs/resource_pricing.md b/specs/src/specs/resource_pricing.md deleted file mode 100644 index 7b2572cdb6..0000000000 --- a/specs/src/specs/resource_pricing.md +++ /dev/null @@ -1 +0,0 @@ -# Resource Pricing diff --git a/specs/src/specs/shares.md b/specs/src/specs/shares.md deleted file mode 100644 index 806e046c43..0000000000 --- a/specs/src/specs/shares.md +++ /dev/null @@ -1 +0,0 @@ -# Shares diff --git a/specs/src/specs/state_machine_modules.md b/specs/src/specs/state_machine_modules.md deleted file mode 100644 index 58c122c799..0000000000 --- a/specs/src/specs/state_machine_modules.md +++ /dev/null @@ -1 +0,0 @@ -# State Machine Modules diff --git a/specs/src/specs/state_machine_modules_v1.md b/specs/src/specs/state_machine_modules_v1.md deleted file mode 100644 index ec8a32dd68..0000000000 --- a/specs/src/specs/state_machine_modules_v1.md +++ /dev/null @@ -1 +0,0 @@ -# State Machine Modules v1 diff --git a/specs/src/specs/state_machine_modules_v2.md b/specs/src/specs/state_machine_modules_v2.md deleted file mode 100644 index ce9a0bb852..0000000000 --- a/specs/src/specs/state_machine_modules_v2.md +++ /dev/null @@ -1 +0,0 @@ -# State Machine Modules v2 diff --git a/test/cmd/txsim/cli.go b/test/cmd/txsim/cli.go index 13d73003b2..4a0af58447 100644 --- a/test/cmd/txsim/cli.go +++ b/test/cmd/txsim/cli.go @@ -40,6 +40,8 @@ var ( send, sendIterations, sendAmount int stake, stakeValue, blob int useFeegrant, suppressLogs, ignoreFailures bool + upgradeSchedule string + blobShareVersion int ) func main() { @@ -101,8 +103,8 @@ well funded account that can act as the master account. The command runs until a masterAccName = os.Getenv(TxsimMasterAccName) } - if stake == 0 && send == 0 && blob == 0 { - return errors.New("no sequences specified. Use --stake, --send or --blob") + if stake == 0 && send == 0 && blob == 0 && upgradeSchedule == "" { + return errors.New("no sequences specified. Use --stake, --send, --upgrade-schedule or --blob") } // setup the sequences @@ -127,7 +129,21 @@ well funded account that can act as the master account. The command runs until a return fmt.Errorf("invalid blob amounts: %w", err) } - sequences = append(sequences, txsim.NewBlobSequence(sizes, blobsPerPFB).Clone(blob)...) + sequence := txsim.NewBlobSequence(sizes, blobsPerPFB) + if blobShareVersion >= 0 { + sequence.WithShareVersion(uint8(blobShareVersion)) + } + + sequences = append(sequences, sequence.Clone(blob)...) + } + + upgradeScheduleMap, err := parseUpgradeSchedule(upgradeSchedule) + if err != nil { + return fmt.Errorf("invalid upgrade schedule: %w", err) + } + + for height, version := range upgradeScheduleMap { + sequences = append(sequences, txsim.NewUpgradeSequence(version, height)) } if seed == 0 { @@ -199,11 +215,13 @@ func flags() *flag.FlagSet { flags.IntVar(&stake, "stake", 0, "number of stake sequences to run") flags.IntVar(&stakeValue, "stake-value", 1000, "amount of initial stake per sequence") flags.IntVar(&blob, "blob", 0, "number of blob sequences to run") + flags.StringVar(&upgradeSchedule, "upgrade-schedule", "", "upgrade schedule for the network in format height:version i.e. 100:3,200:4") flags.StringVar(&blobSizes, "blob-sizes", "100-1000", "range of blob sizes to send") flags.StringVar(&blobAmounts, "blob-amounts", "1", "range of blobs per PFB specified as a single value or a min-max range (e.g., 10 or 5-10). A single value indicates the exact number of blobs to be created.") flags.BoolVar(&useFeegrant, "feegrant", false, "use the feegrant module to pay for fees") - flags.BoolVar(&suppressLogs, "suppress-logs", false, "disable logging") flags.BoolVar(&ignoreFailures, "ignore-failures", false, "ignore failures") + flags.BoolVar(&suppressLogs, "suppressLogs", false, "disable logging") + flags.IntVar(&blobShareVersion, "blob-share-version", -1, "optionally specify a share version to use for the blob sequences") return flags } @@ -229,3 +247,27 @@ func readRange(r string) (txsim.Range, error) { return txsim.NewRange(n, m), nil } + +func parseUpgradeSchedule(schedule string) (map[int64]uint64, error) { + scheduleMap := make(map[int64]uint64) + if schedule == "" { + return nil, nil + } + scheduleParts := strings.Split(schedule, ",") + for _, part := range scheduleParts { + parts := strings.Split(part, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid upgrade schedule format: %s", part) + } + height, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid height in upgrade schedule: %s", parts[0]) + } + version, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid version in upgrade schedule: %s", parts[1]) + } + scheduleMap[height] = version + } + return scheduleMap, nil +} diff --git a/test/cmd/txsim/cli_test.go b/test/cmd/txsim/cli_test.go index 3520221af0..ad6d1c2268 100644 --- a/test/cmd/txsim/cli_test.go +++ b/test/cmd/txsim/cli_test.go @@ -29,6 +29,7 @@ func TestTxsimCommandFlags(t *testing.T) { "--grpc-endpoint", grpcAddr, "--blob", "5", "--seed", "1234", + "--upgrade-schedule", "10:3", }) err := cmd.ExecuteContext(ctx) require.NoError(t, err) diff --git a/test/e2e/benchmark/benchmark.go b/test/e2e/benchmark/benchmark.go index 456f8da0dd..5af7b8539e 100644 --- a/test/e2e/benchmark/benchmark.go +++ b/test/e2e/benchmark/benchmark.go @@ -7,64 +7,90 @@ import ( "log" "time" + "github.com/tendermint/tendermint/pkg/trace" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/test/e2e/testnet" "github.com/celestiaorg/celestia-app/v3/test/util/testnode" - "github.com/tendermint/tendermint/pkg/trace" + "github.com/celestiaorg/knuu/pkg/knuu" ) +const timeFormat = "20060102_150405" + type BenchmarkTest struct { *testnet.Testnet manifest *Manifest } +// NewBenchmarkTest wraps around testnet.New to create a new benchmark test. +// It may modify genesis consensus parameters based on manifest. func NewBenchmarkTest(name string, manifest *Manifest) (*BenchmarkTest, error) { - // create a new testnet - testNet, err := testnet.New(name, seed, - testnet.GetGrafanaInfoFromEnvVar(), manifest.ChainID, - manifest.GetGenesisModifiers()...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + scope := fmt.Sprintf("%s_%s", name, time.Now().Format(timeFormat)) + kn, err := knuu.New(ctx, knuu.Options{ + Scope: scope, + ProxyEnabled: true, + }) if err != nil { return nil, err } + // context.Background() is used to allow the stopSignal to be functional even after this function returns + kn.HandleStopSignal(context.Background()) + + log.Printf("Knuu initialized with scope %s", kn.Scope) + + testNet, err := testnet.New(kn, testnet.Options{ + Grafana: testnet.GetGrafanaInfoFromEnvVar(), + ChainID: manifest.ChainID, + GenesisModifiers: manifest.GetGenesisModifiers(), + }) + testnet.NoError("failed to create testnet", err) + testNet.SetConsensusParams(manifest.GetConsensusParams()) return &BenchmarkTest{Testnet: testNet, manifest: manifest}, nil } // SetupNodes creates genesis nodes and tx clients based on the manifest. -// There will be manifest.Validators validators and manifest.TxClients tx clients. +// There will be manifest.Validators many validators and manifest.TxClients many tx clients. // Each tx client connects to one validator. If TxClients are fewer than Validators, some validators will not have a tx client. func (b *BenchmarkTest) SetupNodes() error { + ctx := context.Background() testnet.NoError("failed to create genesis nodes", - b.CreateGenesisNodes(b.manifest.Validators, + b.CreateGenesisNodes(ctx, b.manifest.Validators, b.manifest.CelestiaAppVersion, b.manifest.SelfDelegation, - b.manifest.UpgradeHeight, b.manifest.ValidatorResource)) + b.manifest.UpgradeHeight, b.manifest.ValidatorResource, b.manifest.DisableBBR)) // enable latency if specified in the manifest if b.manifest.EnableLatency { for _, node := range b.Nodes() { - if err := node.Instance.EnableBitTwister(); err != nil { - return fmt.Errorf("failed to enable bit twister: %v", err) - } + node.EnableNetShaper() } } // obtain the GRPC endpoints of the validators - gRPCEndpoints, err := b.RemoteGRPCEndpoints() + gRPCEndpoints, err := b.RemoteGRPCEndpoints(ctx) testnet.NoError("failed to get validators GRPC endpoints", err) log.Println("validators GRPC endpoints", gRPCEndpoints) // create tx clients and point them to the validators log.Println("Creating tx clients") - err = b.CreateTxClients(b.manifest.TxClientVersion, + err = b.CreateTxClients( + ctx, + b.manifest.TxClientVersion, b.manifest.BlobSequences, b.manifest.BlobSizes, b.manifest.BlobsPerSeq, - b.manifest.TxClientsResource, gRPCEndpoints) + b.manifest.TxClientsResource, + gRPCEndpoints, + map[int64]uint64{}, // upgrade schedule + ) testnet.NoError("failed to create tx clients", err) log.Println("Setting up testnet") - testnet.NoError("failed to setup testnet", b.Setup( + testnet.NoError("failed to setup testnet", b.Setup(ctx, testnet.WithPerPeerBandwidth(b.manifest.PerPeerBandwidth), testnet.WithTimeoutPropose(b.manifest.TimeoutPropose), testnet.WithTimeoutCommit(b.manifest.TimeoutCommit), @@ -78,21 +104,18 @@ func (b *BenchmarkTest) SetupNodes() error { log.Println("reading trace push config") if pushConfig, err := trace.GetPushConfigFromEnv(); err == nil { log.Print("Setting up trace push config") + envVars := map[string]string{ + trace.PushBucketName: pushConfig.BucketName, + trace.PushRegion: pushConfig.Region, + trace.PushAccessKey: pushConfig.AccessKey, + trace.PushKey: pushConfig.SecretKey, + trace.PushDelay: fmt.Sprintf("%d", pushConfig.PushDelay), + } for _, node := range b.Nodes() { - if err = node.Instance.SetEnvironmentVariable(trace.PushBucketName, pushConfig.BucketName); err != nil { - return fmt.Errorf("failed to set TRACE_PUSH_BUCKET_NAME: %v", err) - } - if err = node.Instance.SetEnvironmentVariable(trace.PushRegion, pushConfig.Region); err != nil { - return fmt.Errorf("failed to set TRACE_PUSH_REGION: %v", err) - } - if err = node.Instance.SetEnvironmentVariable(trace.PushAccessKey, pushConfig.AccessKey); err != nil { - return fmt.Errorf("failed to set TRACE_PUSH_ACCESS_KEY: %v", err) - } - if err = node.Instance.SetEnvironmentVariable(trace.PushKey, pushConfig.SecretKey); err != nil { - return fmt.Errorf("failed to set TRACE_PUSH_SECRET_KEY: %v", err) - } - if err = node.Instance.SetEnvironmentVariable(trace.PushDelay, fmt.Sprintf("%d", pushConfig.PushDelay)); err != nil { - return fmt.Errorf("failed to set TRACE_PUSH_DELAY: %v", err) + for key, value := range envVars { + if err = node.Instance.Build().SetEnvironmentVariable(key, value); err != nil { + return fmt.Errorf("failed to set %s: %v", key, err) + } } } } @@ -101,20 +124,22 @@ func (b *BenchmarkTest) SetupNodes() error { } // Run runs the benchmark test for the specified duration in the manifest. -func (b *BenchmarkTest) Run() error { +func (b *BenchmarkTest) Run(ctx context.Context) error { log.Println("Starting benchmark testnet") log.Println("Starting nodes") - err := b.StartNodes() - if err != nil { + if err := b.StartNodes(ctx); err != nil { return fmt.Errorf("failed to start testnet: %v", err) } // add latency if specified in the manifest if b.manifest.EnableLatency { for _, node := range b.Nodes() { - if err = node.Instance.SetLatencyAndJitter(b.manifest.LatencyParams. - Latency, b.manifest.LatencyParams.Jitter); err != nil { + err := node.SetLatencyAndJitter( + b.manifest.LatencyParams.Latency, + b.manifest.LatencyParams.Jitter, + ) + if err != nil { return fmt.Errorf("failed to set latency and jitter: %v", err) } } @@ -122,15 +147,13 @@ func (b *BenchmarkTest) Run() error { // wait for the nodes to sync log.Println("Waiting for nodes to sync") - err = b.WaitToSync() - if err != nil { + if err := b.WaitToSync(ctx); err != nil { return err } // start tx clients log.Println("Starting tx clients") - err = b.StartTxClients() - if err != nil { + if err := b.StartTxClients(ctx); err != nil { return fmt.Errorf("failed to start tx clients: %v", err) } diff --git a/test/e2e/benchmark/manifest.go b/test/e2e/benchmark/manifest.go index f130069657..b212ff85c4 100644 --- a/test/e2e/benchmark/manifest.go +++ b/test/e2e/benchmark/manifest.go @@ -78,6 +78,10 @@ type Manifest struct { UpgradeHeight int64 GovMaxSquareSize int64 + + DisableBBR bool + + GenesisAppVersion uint64 } func (m *Manifest) GetGenesisModifiers() []genesis.Modifier { @@ -86,6 +90,7 @@ func (m *Manifest) GetGenesisModifiers() []genesis.Modifier { blobParams := blobtypes.DefaultParams() blobParams.GovMaxSquareSize = uint64(m.GovMaxSquareSize) + modifiers = append(modifiers, genesis.SetBlobParams(ecfg.Codec, blobParams)) return modifiers @@ -94,6 +99,7 @@ func (m *Manifest) GetGenesisModifiers() []genesis.Modifier { func (m *Manifest) GetConsensusParams() *tmproto.ConsensusParams { cparams := app.DefaultConsensusParams() cparams.Block.MaxBytes = m.MaxBlockBytes + cparams.Version.AppVersion = m.GenesisAppVersion return cparams } @@ -103,12 +109,16 @@ func (m *Manifest) summary() string { if m.EnableLatency { latency = 1 } + bbr := 1 + if m.DisableBBR { + bbr = 0 + } maxBlockMB := m.MaxBlockBytes / testnet.MB - summary := fmt.Sprintf("v%d-t%d-b%d-bw%dmb-tc%d-tp%d-l%d-%s-%dmb", + summary := fmt.Sprintf("v%d-t%d-b%d-bw%dmb-tc%d-tp%d-l%d-%s-br%d-%dmb", m.Validators, m.TxClients, m.BlobSequences, m.PerPeerBandwidth/testnet.MB, m.TimeoutCommit/time.Second, m.TimeoutPropose/time.Second, - latency, m.Mempool, maxBlockMB) + latency, m.Mempool, bbr, maxBlockMB) if len(summary) > 50 { return summary[:50] } diff --git a/test/e2e/benchmark/throughput.go b/test/e2e/benchmark/throughput.go index 02cb764880..0827f6538e 100644 --- a/test/e2e/benchmark/throughput.go +++ b/test/e2e/benchmark/throughput.go @@ -1,15 +1,13 @@ package main import ( + "context" "log" "time" "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/test/e2e/testnet" -) - -const ( - seed = 42 + "k8s.io/apimachinery/pkg/api/resource" ) var bigBlockManifest = Manifest{ @@ -17,16 +15,16 @@ var bigBlockManifest = Manifest{ Validators: 2, TxClients: 2, ValidatorResource: testnet.Resources{ - MemoryRequest: "12Gi", - MemoryLimit: "12Gi", - CPU: "8", - Volume: "20Gi", + MemoryRequest: resource.MustParse("12Gi"), + MemoryLimit: resource.MustParse("12Gi"), + CPU: resource.MustParse("8"), + Volume: resource.MustParse("20Gi"), }, TxClientsResource: testnet.Resources{ - MemoryRequest: "1Gi", - MemoryLimit: "3Gi", - CPU: "2", - Volume: "1Gi", + MemoryRequest: resource.MustParse("1Gi"), + MemoryLimit: resource.MustParse("3Gi"), + CPU: resource.MustParse("2"), + Volume: resource.MustParse("1Gi"), }, SelfDelegation: 10000000, // @TODO Update the CelestiaAppVersion and TxClientVersion to the latest @@ -51,6 +49,7 @@ var bigBlockManifest = Manifest{ TestDuration: 5 * time.Minute, LocalTracingType: "local", PushTrace: true, + DisableBBR: true, } func TwoNodeSimple(logger *log.Logger) error { @@ -88,19 +87,23 @@ func TwoNodeSimple(logger *log.Logger) error { DownloadTraces: true, TestDuration: 3 * time.Minute, TxClients: 2, + DisableBBR: true, } benchTest, err := NewBenchmarkTest(testName, &manifest) testnet.NoError("failed to create benchmark test", err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + defer func() { log.Print("Cleaning up testnet") - benchTest.Cleanup() + benchTest.Cleanup(ctx) }() testnet.NoError("failed to setup nodes", benchTest.SetupNodes()) - testnet.NoError("failed to run the benchmark test", benchTest.Run()) + testnet.NoError("failed to run the benchmark test", benchTest.Run(ctx)) testnet.NoError("failed to check results", benchTest.CheckResults(1*testnet.MB)) @@ -114,13 +117,16 @@ func runBenchmarkTest(logger *log.Logger, testName string, manifest Manifest) er benchTest, err := NewBenchmarkTest(testName, &manifest) testnet.NoError("failed to create benchmark test", err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + defer func() { log.Print("Cleaning up testnet") - benchTest.Cleanup() + benchTest.Cleanup(ctx) }() testnet.NoError("failed to setup nodes", benchTest.SetupNodes()) - testnet.NoError("failed to run the benchmark test", benchTest.Run()) + testnet.NoError("failed to run the benchmark test", benchTest.Run(ctx)) expectedBlockSize := int64(0.90 * float64(manifest.MaxBlockBytes)) testnet.NoError("failed to check results", benchTest.CheckResults(expectedBlockSize)) diff --git a/test/e2e/experiment/compact_blocks/main.go b/test/e2e/experiment/compact_blocks/main.go index cb106b714b..0e6825c2ae 100644 --- a/test/e2e/experiment/compact_blocks/main.go +++ b/test/e2e/experiment/compact_blocks/main.go @@ -12,6 +12,7 @@ import ( "github.com/celestiaorg/celestia-app/v3/test/e2e/testnet" "github.com/celestiaorg/celestia-app/v3/test/util/genesis" blobtypes "github.com/celestiaorg/celestia-app/v3/x/blob/types" + "github.com/celestiaorg/knuu/pkg/knuu" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/pkg/trace" @@ -19,7 +20,7 @@ import ( ) const ( - compactBlocksVersion = "70e7354" //"a28b9e7" + compactBlocksVersion = "70e7354" ) func main() { @@ -34,6 +35,7 @@ func Run() error { timeoutCommit = 3 * time.Second timeoutPropose = 4 * time.Second version = compactBlocksVersion + timeFormat = "20060102_150405" ) blobParams := blobtypes.DefaultParams() @@ -41,39 +43,50 @@ func Run() error { blobParams.GovMaxSquareSize = 128 ecfg := encoding.MakeConfig(app.ModuleBasics) - network, err := testnet.New("compact-blocks", 864, nil, "", genesis.SetBlobParams(ecfg.Codec, blobParams)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + identifier := fmt.Sprintf("%s_%s", "compact-blocks", time.Now().Format(timeFormat)) + kn, err := knuu.New(ctx, knuu.Options{ + Scope: identifier, + ProxyEnabled: true, + }) + testnet.NoError("failed to initialize Knuu", err) + + network, err := testnet.New(kn, testnet.Options{ + GenesisModifiers: []genesis.Modifier{ + genesis.SetBlobParams(ecfg.Codec, blobParams), + }, + ChainID: identifier, + }) if err != nil { return err } - defer network.Cleanup() + defer network.Cleanup(ctx) cparams := app.DefaultConsensusParams() cparams.Block.MaxBytes = 8 * 1024 * 1024 network.SetConsensusParams(cparams) - err = network.CreateGenesisNodes(nodes, version, 10000000, 0, testnet.DefaultResources) + err = network.CreateGenesisNodes(ctx, nodes, version, 10000000, 0, testnet.DefaultResources, true) if err != nil { return err } - for _, node := range network.Nodes() { - if err := node.Instance.EnableBitTwister(); err != nil { - return fmt.Errorf("failed to enable bit twister: %v", err) - } - } - - gRPCEndpoints, err := network.RemoteGRPCEndpoints() + gRPCEndpoints, err := network.RemoteGRPCEndpoints(ctx) if err != nil { return err } err = network.CreateTxClients( + ctx, compactBlocksVersion, 40, "128000-256000", 1, testnet.DefaultResources, gRPCEndpoints[:2], + map[int64]uint64{}, ) if err != nil { return err @@ -81,6 +94,7 @@ func Run() error { log.Printf("Setting up network\n") err = network.Setup( + ctx, testnet.WithTimeoutCommit(timeoutCommit), testnet.WithTimeoutPropose(timeoutPropose), testnet.WithMempool("v2"), @@ -109,43 +123,29 @@ func Run() error { } log.Print("Setting up trace push config") for _, node := range network.Nodes() { - if err = node.Instance.SetEnvironmentVariable(trace.PushBucketName, pushConfig.BucketName); err != nil { + if err = node.Instance.Build().SetEnvironmentVariable(trace.PushBucketName, pushConfig.BucketName); err != nil { return fmt.Errorf("failed to set TRACE_PUSH_BUCKET_NAME: %v", err) } - if err = node.Instance.SetEnvironmentVariable(trace.PushRegion, pushConfig.Region); err != nil { + if err = node.Instance.Build().SetEnvironmentVariable(trace.PushRegion, pushConfig.Region); err != nil { return fmt.Errorf("failed to set TRACE_PUSH_REGION: %v", err) } - if err = node.Instance.SetEnvironmentVariable(trace.PushAccessKey, pushConfig.AccessKey); err != nil { + if err = node.Instance.Build().SetEnvironmentVariable(trace.PushAccessKey, pushConfig.AccessKey); err != nil { return fmt.Errorf("failed to set TRACE_PUSH_ACCESS_KEY: %v", err) } - if err = node.Instance.SetEnvironmentVariable(trace.PushKey, pushConfig.SecretKey); err != nil { + if err = node.Instance.Build().SetEnvironmentVariable(trace.PushKey, pushConfig.SecretKey); err != nil { return fmt.Errorf("failed to set TRACE_PUSH_SECRET_KEY: %v", err) } - if err = node.Instance.SetEnvironmentVariable(trace.PushDelay, fmt.Sprintf("%d", pushConfig.PushDelay)); err != nil { + if err = node.Instance.Build().SetEnvironmentVariable(trace.PushDelay, fmt.Sprintf("%d", pushConfig.PushDelay)); err != nil { return fmt.Errorf("failed to set TRACE_PUSH_DELAY: %v", err) } } log.Printf("Starting network\n") - err = network.StartNodes() + err = network.Start(ctx) if err != nil { return err } - if err := network.WaitToSync(); err != nil { - return err - } - - for _, node := range network.Nodes() { - if err = node.Instance.SetLatencyAndJitter(40, 10); err != nil { - return fmt.Errorf("failed to set latency and jitter: %v", err) - } - } - - if err := network.StartTxClients(); err != nil { - return err - } - // run the test for 5 minutes heightTicker := time.NewTicker(20 * time.Second) timeout := time.NewTimer(5 * time.Minute) @@ -164,14 +164,13 @@ func Run() error { log.Printf("Height: %v", status.SyncInfo.LatestBlockHeight) case <-timeout.C: - network.StopTxClients() log.Println("--- COLLECTING DATA") file := "/Users/callum/Developer/go/src/github.com/celestiaorg/big-blocks-research/traces" - if err := trace.S3Download(file, network.ChainID(), pushConfig, schema.RoundStateTable, schema.BlockTable, schema.ProposalTable, schema.CompactBlockTable, schema.MempoolRecoveryTable); err != nil { + if err := trace.S3Download(file, identifier, pushConfig, schema.RoundStateTable, schema.BlockTable, schema.ProposalTable, schema.CompactBlockTable, schema.MempoolRecoveryTable); err != nil { return fmt.Errorf("failed to download traces from S3: %w", err) } - log.Println("--- FINISHED ✅: ChainID: ", network.ChainID()) + log.Println("--- FINISHED ✅: ChainID: ", identifier) return nil } } diff --git a/test/e2e/main.go b/test/e2e/main.go index c133e00a6d..768907e2cf 100644 --- a/test/e2e/main.go +++ b/test/e2e/main.go @@ -7,10 +7,10 @@ import ( ) const ( - seed = 42 + timeFormat = "20060102_150405" ) -type TestFunc func(*log.Logger) error +type TestFunc func(logger *log.Logger) error type Test struct { Name string @@ -23,6 +23,7 @@ func main() { tests := []Test{ {"MinorVersionCompatibility", MinorVersionCompatibility}, {"MajorUpgradeToV2", MajorUpgradeToV2}, + {"MajorUpgradeToV3", MajorUpgradeToV3}, {"E2ESimple", E2ESimple}, } diff --git a/test/e2e/major_upgrade_v2.go b/test/e2e/major_upgrade_v2.go index f595be542d..3e44eb0bc3 100644 --- a/test/e2e/major_upgrade_v2.go +++ b/test/e2e/major_upgrade_v2.go @@ -17,46 +17,59 @@ import ( ) func MajorUpgradeToV2(logger *log.Logger) error { - latestVersion, err := testnet.GetLatestVersion() - testnet.NoError("failed to get latest version", err) - - logger.Println("Running major upgrade to v2 test", "version", latestVersion) - + testName := "MajorUpgradeToV2" numNodes := 4 upgradeHeight := int64(10) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() + scope := fmt.Sprintf("%s_%s", testName, time.Now().Format(timeFormat)) + kn, err := knuu.New(ctx, knuu.Options{ + Scope: scope, + ProxyEnabled: true, + }) + testnet.NoError("failed to initialize Knuu", err) + + kn.HandleStopSignal(ctx) + logger.Printf("Knuu initialized with scope %s", kn.Scope) + logger.Println("Creating testnet") - testNet, err := testnet.New("runMajorUpgradeToV2", seed, nil, "test") + testNet, err := testnet.New(kn, testnet.Options{}) testnet.NoError("failed to create testnet", err) - defer testNet.Cleanup() + defer testNet.Cleanup(ctx) + + latestVersion, err := testnet.GetLatestVersion() + testnet.NoError("failed to get latest version", err) + + logger.Printf("Running %s test with version %s", testName, latestVersion) testNet.SetConsensusParams(app.DefaultInitialConsensusParams()) - preloader, err := knuu.NewPreloader() + preloader, err := testNet.NewPreloader() testnet.NoError("failed to create preloader", err) - defer func() { _ = preloader.EmptyImages() }() - testnet.NoError("failed to add image", preloader.AddImage(testnet.DockerImageName(latestVersion))) + defer func() { _ = preloader.EmptyImages(ctx) }() + testnet.NoError("failed to add image", preloader.AddImage(ctx, testnet.DockerImageName(latestVersion))) logger.Println("Creating genesis nodes") for i := 0; i < numNodes; i++ { - err := testNet.CreateGenesisNode(latestVersion, 10000000, upgradeHeight, testnet.DefaultResources) + err := testNet.CreateGenesisNode(ctx, latestVersion, 10000000, upgradeHeight, testnet.DefaultResources, true) testnet.NoError("failed to create genesis node", err) } logger.Println("Creating txsim") - endpoints, err := testNet.RemoteGRPCEndpoints() + endpoints, err := testNet.RemoteGRPCEndpoints(ctx) testnet.NoError("failed to get remote gRPC endpoints", err) - err = testNet.CreateTxClient("txsim", testnet.TxsimVersion, 1, "100-2000", 100, testnet.DefaultResources, endpoints[0]) + upgradeSchedule := map[int64]uint64{} + err = testNet.CreateTxClient(ctx, "txsim", testnet.TxsimVersion, 1, "100-2000", 100, testnet.DefaultResources, endpoints[0], upgradeSchedule) testnet.NoError("failed to create tx client", err) logger.Println("Setting up testnet") - testnet.NoError("Failed to setup testnet", testNet.Setup()) + testnet.NoError("Failed to setup testnet", testNet.Setup(ctx)) logger.Println("Starting testnet") - testnet.NoError("Failed to start testnet", testNet.Start()) + testnet.NoError("Failed to start testnet", testNet.Start(ctx)) heightBefore := upgradeHeight - 1 for i := 0; i < numNodes; i++ { @@ -90,7 +103,7 @@ func MajorUpgradeToV2(logger *log.Logger) error { return fmt.Errorf("failed to get height: %w", err) } - if err := node.Upgrade(latestVersion); err != nil { + if err := node.Upgrade(ctx, latestVersion); err != nil { return fmt.Errorf("failed to restart node: %w", err) } diff --git a/test/e2e/major_upgrade_v3.go b/test/e2e/major_upgrade_v3.go new file mode 100644 index 0000000000..35cb79b045 --- /dev/null +++ b/test/e2e/major_upgrade_v3.go @@ -0,0 +1,190 @@ +package main + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" + "github.com/celestiaorg/celestia-app/v3/test/e2e/testnet" + "github.com/celestiaorg/knuu/pkg/knuu" + tmtypes "github.com/tendermint/tendermint/types" +) + +func MajorUpgradeToV3(logger *log.Logger) error { + testName := "MajorUpgradeToV3" + numNodes := 4 + upgradeHeightV3 := int64(40) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + scope := fmt.Sprintf("%s_%s", testName, time.Now().Format(timeFormat)) + kn, err := knuu.New(ctx, knuu.Options{ + Scope: scope, + ProxyEnabled: true, + }) + testnet.NoError("failed to initialize Knuu", err) + + kn.HandleStopSignal(ctx) + logger.Printf("Knuu initialized with scope %s", kn.Scope) + + logger.Println("Creating testnet") + testNet, err := testnet.New(kn, testnet.Options{}) + testnet.NoError("failed to create testnet", err) + + defer testNet.Cleanup(ctx) + + latestVersion, err := testnet.GetLatestVersion() + testnet.NoError("failed to get latest version", err) + + consensusParams := app.DefaultConsensusParams() + consensusParams.Version.AppVersion = v2.Version // Start the test on v2 + testNet.SetConsensusParams(consensusParams) + + preloader, err := testNet.NewPreloader() + testnet.NoError("failed to create preloader", err) + + err = preloader.AddImage(ctx, testnet.DockerImageName(latestVersion)) + testnet.NoError("failed to add image", err) + defer func() { _ = preloader.EmptyImages(ctx) }() + + logger.Println("Creating genesis nodes") + for i := 0; i < numNodes; i++ { + err := testNet.CreateGenesisNode(ctx, latestVersion, 10000000, 0, testnet.DefaultResources, true) + testnet.NoError("failed to create genesis node", err) + } + + logger.Println("Creating txsim") + endpoints, err := testNet.RemoteGRPCEndpoints(ctx) + testnet.NoError("failed to get remote gRPC endpoints", err) + upgradeSchedule := map[int64]uint64{ + upgradeHeightV3: v3.Version, + } + + err = testNet.CreateTxClient(ctx, "txsim", latestVersion, 1, "100-2000", 100, testnet.DefaultResources, endpoints[0], upgradeSchedule) + testnet.NoError("failed to create tx client", err) + + logger.Println("Setting up testnet") + testnet.NoError("Failed to setup testnet", testNet.Setup(ctx)) + logger.Println("Starting testnet") + testnet.NoError("Failed to start testnet", testNet.Start(ctx)) + + timer := time.NewTimer(20 * time.Minute) + defer timer.Stop() + ticker := time.NewTicker(3 * time.Second) + defer ticker.Stop() + + logger.Println("waiting for upgrade") + + // wait for the upgrade to complete + var upgradedHeight int64 + for _, node := range testNet.Nodes() { + client, err := node.Client() + testnet.NoError("failed to get client", err) + upgradeComplete := false + lastHeight := int64(0) + for !upgradeComplete { + select { + case <-timer.C: + return fmt.Errorf("failed to upgrade to v3, last height: %d", lastHeight) + case <-ticker.C: + resp, err := client.Header(ctx, nil) + testnet.NoError("failed to get header", err) + if resp.Header.Version.App == v3.Version { + upgradeComplete = true + if upgradedHeight == 0 { + upgradedHeight = resp.Header.Height + } + } + logger.Printf("height %v", resp.Header.Height) + lastHeight = resp.Header.Height + } + } + } + + // check if the timeouts are set correctly + rpcNode := testNet.Nodes()[0] + client, err := rpcNode.Client() + testnet.NoError("failed to get client", err) + + startHeight := upgradeHeightV3 - 5 + endHeight := upgradedHeight + 5 + + type versionDuration struct { + dur time.Duration + block *tmtypes.Block + } + + blockSummaries := make([]versionDuration, 0, endHeight-startHeight) + var prevBlockTime time.Time + + for h := startHeight; h < endHeight; h++ { + resp, err := client.Block(ctx, &h) + testnet.NoError("failed to get header", err) + blockTime := resp.Block.Time + + if h == startHeight { + if resp.Block.Version.App != v2.Version { + return fmt.Errorf("expected start height %v was app version 2", startHeight) + } + prevBlockTime = blockTime + continue + } + + blockDur := blockTime.Sub(prevBlockTime) + prevBlockTime = blockTime + blockSummaries = append(blockSummaries, versionDuration{dur: blockDur, block: resp.Block}) + } + + preciseUpgradeHeight := 0 + multipleRounds := 0 + for _, b := range blockSummaries { + + // check for the precise upgrade height and skip, as the block time + // won't match due to the off by 1 nature of the block time. + if b.block.Version.App == v3.Version && preciseUpgradeHeight == 0 { + preciseUpgradeHeight = int(b.block.Height) + continue + } + + // don't test heights with multiple rounds as the times are off and fail + // later if there are too many + if b.block.LastCommit.Round > 0 { + multipleRounds++ + continue + } + + if b.dur < appconsts.GetTimeoutCommit(b.block.Version.App) { + return fmt.Errorf( + "block was too fast for corresponding version: version %v duration %v upgrade height %v height %v", + b.block.Version.App, + b.dur, + preciseUpgradeHeight, + b.block.Height, + ) + } + + // check if the time decreased for v3 + if b.block.Version.App == v3.Version && b.dur > appconsts.GetTimeoutCommit(b.block.Version.App)+5 { + return fmt.Errorf( + "block was too slow for corresponding version: version %v duration %v upgrade height %v height %v", + b.block.Version.App, + b.dur, + preciseUpgradeHeight, + b.block.Height, + ) + } + + } + + if multipleRounds > 2 { + return fmt.Errorf("too many multiple rounds for test to be reliable: %d", multipleRounds) + } + + return nil +} diff --git a/test/e2e/minor_version_compatibility.go b/test/e2e/minor_version_compatibility.go index d0fd6314e9..bb81d5e021 100644 --- a/test/e2e/minor_version_compatibility.go +++ b/test/e2e/minor_version_compatibility.go @@ -19,6 +19,11 @@ import ( ) func MinorVersionCompatibility(logger *log.Logger) error { + const ( + testName = "MinorVersionCompatibility" + numNodes = 4 + ) + versionStr, err := getAllVersions() testnet.NoError("failed to get versions", err) versions1 := testnet.ParseVersions(versionStr).FilterMajor(v1.Version).FilterOutReleaseCandidates() @@ -28,47 +33,60 @@ func MinorVersionCompatibility(logger *log.Logger) error { if len(versions) == 0 { logger.Fatal("no versions to test") } - numNodes := 4 + seed := testnet.DefaultSeed r := rand.New(rand.NewSource(seed)) - logger.Println("Running minor version compatibility test", "versions", versions) - - testNet, err := testnet.New("runMinorVersionCompatibility", seed, nil, "test") - testnet.NoError("failed to create testnet", err) + logger.Printf("Running %s test with versions %s", testName, versions) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - defer testNet.Cleanup() + identifier := fmt.Sprintf("%s_%s", testName, time.Now().Format(timeFormat)) + kn, err := knuu.New(ctx, knuu.Options{ + Scope: identifier, + ProxyEnabled: true, + }) + testnet.NoError("failed to initialize Knuu", err) + + kn.HandleStopSignal(ctx) + logger.Printf("Knuu initialized with scope %s", kn.Scope) + + testNet, err := testnet.New(kn, testnet.Options{Seed: seed}) + testnet.NoError("failed to create testnet", err) + + defer testNet.Cleanup(ctx) testNet.SetConsensusParams(app.DefaultInitialConsensusParams()) // preload all docker images - preloader, err := knuu.NewPreloader() + preloader, err := testNet.NewPreloader() testnet.NoError("failed to create preloader", err) - defer func() { _ = preloader.EmptyImages() }() + defer func() { _ = preloader.EmptyImages(ctx) }() for _, v := range versions { - testnet.NoError("failed to add image", preloader.AddImage(testnet.DockerImageName(v.String()))) + testnet.NoError("failed to add image", preloader.AddImage(ctx, testnet.DockerImageName(v.String()))) } for i := 0; i < numNodes; i++ { // each node begins with a random version within the same major version set v := versions.Random(r).String() logger.Println("Starting node", "node", i, "version", v) - testnet.NoError("failed to create genesis node", testNet.CreateGenesisNode(v, 10000000, 0, testnet.DefaultResources)) + + testnet.NoError("failed to create genesis node", + testNet.CreateGenesisNode(ctx, v, 10000000, 0, testnet.DefaultResources, false)) } logger.Println("Creating txsim") - endpoints, err := testNet.RemoteGRPCEndpoints() + endpoints, err := testNet.RemoteGRPCEndpoints(ctx) testnet.NoError("failed to get remote gRPC endpoints", err) - err = testNet.CreateTxClient("txsim", testnet.TxsimVersion, 1, "100-2000", 100, testnet.DefaultResources, endpoints[0]) + upgradeSchedule := map[int64]uint64{} + err = testNet.CreateTxClient(ctx, "txsim", testnet.TxsimVersion, 1, "100-2000", 100, testnet.DefaultResources, endpoints[0], upgradeSchedule) testnet.NoError("failed to create tx client", err) // start the testnet logger.Println("Setting up testnet") - testnet.NoError("Failed to setup testnet", testNet.Setup()) + testnet.NoError("Failed to setup testnet", testNet.Setup(ctx)) logger.Println("Starting testnet") - testnet.NoError("Failed to start testnet", testNet.Start()) + testnet.NoError("Failed to start testnet", testNet.Start(ctx)) for i := 0; i < len(versions)*2; i++ { // FIXME: skip the first node because we need them available to @@ -84,10 +102,10 @@ func MinorVersionCompatibility(logger *log.Logger) error { newVersion := versions.Random(r).String() logger.Println("Upgrading node", "node", i%numNodes+1, "version", newVersion) - testnet.NoError("failed to upgrade node", testNet.Node(i%numNodes).Upgrade(newVersion)) + testnet.NoError("failed to upgrade node", testNet.Node(i%numNodes).Upgrade(ctx, newVersion)) time.Sleep(10 * time.Second) // wait for the node to reach two more heights - testnet.NoError("failed to wait for height", waitForHeight(ctx, client, heightBefore+2, 30*time.Second)) + testnet.NoError("failed to wait for height", waitForHeight(ctx, client, heightBefore+2, time.Minute)) } heights := make([]int64, 4) diff --git a/test/e2e/simple.go b/test/e2e/simple.go index 63f328f141..1a9bad1eb5 100644 --- a/test/e2e/simple.go +++ b/test/e2e/simple.go @@ -9,43 +9,60 @@ import ( "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/test/e2e/testnet" "github.com/celestiaorg/celestia-app/v3/test/util/testnode" + "github.com/celestiaorg/knuu/pkg/knuu" ) -// This test runs a simple testnet with 4 validators. It submits both MsgPayForBlobs +// E2ESimple runs a simple testnet with 4 validators. It submits both MsgPayForBlobs // and MsgSends over 30 seconds and then asserts that at least 10 transactions were // committed. func E2ESimple(logger *log.Logger) error { - latestVersion, err := testnet.GetLatestVersion() - testnet.NoError("failed to get latest version", err) + const testName = "E2ESimple" + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - logger.Println("Running simple e2e test", "version", latestVersion) + identifier := fmt.Sprintf("%s_%s", testName, time.Now().Format(timeFormat)) + kn, err := knuu.New(ctx, knuu.Options{ + Scope: identifier, + ProxyEnabled: true, + }) + testnet.NoError("failed to initialize Knuu", err) + kn.HandleStopSignal(ctx) + logger.Printf("Knuu initialized with scope %s", kn.Scope) - testNet, err := testnet.New("E2ESimple", seed, nil, "test") + testNet, err := testnet.New(kn, testnet.Options{}) testnet.NoError("failed to create testnet", err) - defer testNet.Cleanup() + defer testNet.Cleanup(ctx) + + latestVersion, err := testnet.GetLatestVersion() + testnet.NoError("failed to get latest version", err) + logger.Printf("Running %s test with version %s", testName, latestVersion) logger.Println("Creating testnet validators") - testnet.NoError("failed to create genesis nodes", testNet.CreateGenesisNodes(4, latestVersion, 10000000, 0, testnet.DefaultResources)) + testnet.NoError("failed to create genesis nodes", + testNet.CreateGenesisNodes(ctx, 4, latestVersion, 10000000, 0, testnet.DefaultResources, true)) logger.Println("Creating txsim") - endpoints, err := testNet.RemoteGRPCEndpoints() + endpoints, err := testNet.RemoteGRPCEndpoints(ctx) testnet.NoError("failed to get remote gRPC endpoints", err) - err = testNet.CreateTxClient("txsim", testnet.TxsimVersion, 10, "100-2000", 1, testnet.DefaultResources, endpoints[0]) + upgradeSchedule := map[int64]uint64{} + err = testNet.CreateTxClient(ctx, "txsim", testnet.TxsimVersion, 10, "100-2000", 100, testnet.DefaultResources, endpoints[0], upgradeSchedule) testnet.NoError("failed to create tx client", err) logger.Println("Setting up testnets") - testnet.NoError("failed to setup testnets", testNet.Setup()) + testnet.NoError("failed to setup testnets", testNet.Setup(ctx)) logger.Println("Starting testnets") - testnet.NoError("failed to start testnets", testNet.Start()) + testnet.NoError("failed to start testnets", testNet.Start(ctx)) logger.Println("Waiting for 30 seconds to produce blocks") // wait for 30 seconds time.Sleep(30 * time.Second) - blockchain, err := testnode.ReadBlockchainHeaders(context.Background(), testNet.Node(0).AddressRPC()) - testnet.NoError("failed to read blockchain", err) + logger.Println("Reading blockchain headers") + blockchain, err := testnode.ReadBlockchainHeaders(ctx, testNet.Node(0).AddressRPC()) + testnet.NoError("failed to read blockchain headers", err) totalTxs := 0 for _, blockMeta := range blockchain { diff --git a/test/e2e/testnet/defaults.go b/test/e2e/testnet/defaults.go index 5f7209c5d8..6c6da2cbf9 100644 --- a/test/e2e/testnet/defaults.go +++ b/test/e2e/testnet/defaults.go @@ -1,10 +1,12 @@ package testnet +import "k8s.io/apimachinery/pkg/api/resource" + var DefaultResources = Resources{ - MemoryRequest: "3Gi", - MemoryLimit: "3Gi", - CPU: "1000m", - Volume: "2Gi", + MemoryRequest: resource.MustParse("400Mi"), + MemoryLimit: resource.MustParse("400Mi"), + CPU: resource.MustParse("300m"), + Volume: resource.MustParse("1Gi"), } const ( diff --git a/test/e2e/testnet/key_generator.go b/test/e2e/testnet/key_generator.go new file mode 100644 index 0000000000..cfaa39e866 --- /dev/null +++ b/test/e2e/testnet/key_generator.go @@ -0,0 +1,37 @@ +package testnet + +import ( + "io" + "math/rand" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" +) + +type keyGenerator struct { + random *rand.Rand +} + +func newKeyGenerator(seed int64) *keyGenerator { + return &keyGenerator{ + random: rand.New(rand.NewSource(seed)), //nolint:gosec + } +} + +func (g *keyGenerator) Generate(keyType string) crypto.PrivKey { + seed := make([]byte, ed25519.SeedSize) + + _, err := io.ReadFull(g.random, seed) + if err != nil { + panic(err) // this shouldn't happen + } + switch keyType { + case "secp256k1": + return secp256k1.GenPrivKeySecp256k1(seed) + case "", "ed25519": + return ed25519.GenPrivKeyFromSecret(seed) + default: + panic("KeyType not supported") // should not make it this far + } +} diff --git a/test/e2e/testnet/node.go b/test/e2e/testnet/node.go index 409bbbce7b..cf96ee905d 100644 --- a/test/e2e/testnet/node.go +++ b/test/e2e/testnet/node.go @@ -2,12 +2,11 @@ package testnet import ( + "context" "fmt" "os" "path/filepath" - "github.com/celestiaorg/celestia-app/v3/test/util/genesis" - "github.com/celestiaorg/knuu/pkg/knuu" serverconfig "github.com/cosmos/cosmos-sdk/server/config" "github.com/rs/zerolog/log" "github.com/tendermint/tendermint/config" @@ -18,6 +17,13 @@ import ( "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/types" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/celestiaorg/celestia-app/v3/test/util/genesis" + "github.com/celestiaorg/knuu/pkg/instance" + "github.com/celestiaorg/knuu/pkg/knuu" + "github.com/celestiaorg/knuu/pkg/sidecars/netshaper" + "github.com/celestiaorg/knuu/pkg/sidecars/observability" ) const ( @@ -41,7 +47,9 @@ type Node struct { SignerKey crypto.PrivKey NetworkKey crypto.PrivKey SelfDelegation int64 - Instance *knuu.Instance + Instance *instance.Instance + sidecars []instance.SidecarManager + netShaper *netshaper.NetShaper // a reference to the netshaper sidecar rpcProxyHost string // FIXME: This does not work currently with the reverse proxy @@ -51,8 +59,7 @@ type Node struct { // PullRoundStateTraces retrieves the round state traces from a node. // It will save them to the provided path. -func (n *Node) PullRoundStateTraces(path string) ([]trace.Event[schema.RoundState], error, -) { +func (n *Node) PullRoundStateTraces(path string) ([]trace.Event[schema.RoundState], error) { addr := n.AddressTracing() log.Info().Str("Address", addr).Msg("Pulling round state traces") @@ -65,8 +72,7 @@ func (n *Node) PullRoundStateTraces(path string) ([]trace.Event[schema.RoundStat // PullBlockSummaryTraces retrieves the block summary traces from a node. // It will save them to the provided path. -func (n *Node) PullBlockSummaryTraces(path string) ([]trace.Event[schema.BlockSummary], error, -) { +func (n *Node) PullBlockSummaryTraces(path string) ([]trace.Event[schema.BlockSummary], error) { addr := n.AddressTracing() log.Info().Str("Address", addr).Msg("Pulling block summary traces") @@ -80,96 +86,114 @@ func (n *Node) PullBlockSummaryTraces(path string) ([]trace.Event[schema.BlockSu // Resources defines the resource requirements for a Node. type Resources struct { // MemoryRequest specifies the initial memory allocation for the Node. - MemoryRequest string + MemoryRequest resource.Quantity // MemoryLimit specifies the maximum memory allocation for the Node. - MemoryLimit string + MemoryLimit resource.Quantity // CPU specifies the CPU allocation for the Node. - CPU string + CPU resource.Quantity // Volume specifies the storage volume allocation for the Node. - Volume string + Volume resource.Quantity } func NewNode( - name, version string, - startHeight, selfDelegation int64, + ctx context.Context, + name string, + version string, + startHeight int64, + selfDelegation int64, peers []string, - signerKey, networkKey crypto.PrivKey, - upgradeHeight int64, + signerKey crypto.PrivKey, + networkKey crypto.PrivKey, + upgradeHeightV2 int64, resources Resources, grafana *GrafanaInfo, + kn *knuu.Knuu, + disableBBR bool, ) (*Node, error) { - instance, err := knuu.NewInstance(name) + knInstance, err := kn.NewInstance(name) if err != nil { return nil, err } - err = instance.SetImage(DockerImageName(version)) + err = knInstance.Build().SetImage(ctx, DockerImageName(version)) if err != nil { return nil, err } - if err := instance.AddPortTCP(rpcPort); err != nil { - return nil, err - } - if err := instance.AddPortTCP(p2pPort); err != nil { - return nil, err - } - if err := instance.AddPortTCP(grpcPort); err != nil { - return nil, err - } - if err := instance.AddPortTCP(tracingPort); err != nil { - return nil, err + for _, port := range []int{rpcPort, p2pPort, grpcPort, tracingPort} { + if err := knInstance.Network().AddPortTCP(port); err != nil { + return nil, err + } } + var sidecars []instance.SidecarManager if grafana != nil { + obsySc := observability.New() + // add support for metrics - if err := instance.SetPrometheusEndpoint(prometheusPort, fmt.Sprintf("knuu-%s", knuu.Scope()), "1m"); err != nil { + if err := obsySc.SetPrometheusEndpoint(prometheusPort, fmt.Sprintf("knuu-%s", kn.Scope), "1m"); err != nil { return nil, fmt.Errorf("setting prometheus endpoint: %w", err) } - if err := instance.SetJaegerEndpoint(14250, 6831, 14268); err != nil { + if err := obsySc.SetJaegerEndpoint(14250, 6831, 14268); err != nil { return nil, fmt.Errorf("error setting jaeger endpoint: %v", err) } - if err := instance.SetOtlpExporter(grafana.Endpoint, grafana.Username, grafana.Token); err != nil { + if err := obsySc.SetOtlpExporter(grafana.Endpoint, grafana.Username, grafana.Token); err != nil { return nil, fmt.Errorf("error setting otlp exporter: %v", err) } - if err := instance.SetJaegerExporter("jaeger-collector.jaeger-cluster.svc.cluster.local:14250"); err != nil { + if err := obsySc.SetJaegerExporter("jaeger-collector.jaeger-cluster.svc.cluster.local:14250"); err != nil { return nil, fmt.Errorf("error setting jaeger exporter: %v", err) } + sidecars = append(sidecars, obsySc) } - err = instance.SetMemory(resources.MemoryRequest, resources.MemoryLimit) + err = knInstance.Resources().SetMemory(resources.MemoryRequest, resources.MemoryLimit) if err != nil { return nil, err } - err = instance.SetCPU(resources.CPU) + err = knInstance.Resources().SetCPU(resources.CPU) if err != nil { return nil, err } - err = instance.AddVolumeWithOwner(remoteRootDir, resources.Volume, 10001) + err = knInstance.Storage().AddVolumeWithOwner(remoteRootDir, resources.Volume, 10001) if err != nil { return nil, err } - args := []string{"start", fmt.Sprintf("--home=%s", remoteRootDir), "--rpc.laddr=tcp://0.0.0.0:26657", "--force-no-bbr"} - if upgradeHeight != 0 { - args = append(args, fmt.Sprintf("--v2-upgrade-height=%d", upgradeHeight)) + args := []string{"start", fmt.Sprintf("--home=%s", remoteRootDir), "--rpc.laddr=tcp://0.0.0.0:26657"} + if disableBBR { + args = append(args, "--force-no-bbr") + } + if upgradeHeightV2 != 0 { + args = append(args, fmt.Sprintf("--v2-upgrade-height=%d", upgradeHeightV2)) } - err = instance.SetArgs(args...) - if err != nil { + if err := knInstance.Build().SetArgs(args...); err != nil { return nil, err } return &Node{ Name: name, - Instance: instance, + Instance: knInstance, Version: version, StartHeight: startHeight, InitialPeers: peers, SignerKey: signerKey, NetworkKey: networkKey, SelfDelegation: selfDelegation, + sidecars: sidecars, }, nil } -func (n *Node) Init(genesis *types.GenesisDoc, peers []string, configOptions ...Option) error { +func (n *Node) EnableNetShaper() { + n.netShaper = netshaper.New() + n.sidecars = append(n.sidecars, n.netShaper) +} + +func (n *Node) SetLatencyAndJitter(latency, jitter int64) error { + if n.netShaper == nil { + return fmt.Errorf("netshaper is not enabled") + } + return n.netShaper.SetLatencyAndJitter(latency, jitter) +} + +func (n *Node) Init(ctx context.Context, genesis *types.GenesisDoc, peers []string, configOptions ...Option) error { if len(peers) == 0 { return fmt.Errorf("no peers provided") } @@ -190,7 +214,7 @@ func (n *Node) Init(genesis *types.GenesisDoc, peers []string, configOptions ... } // Create and write the config file - cfg, err := MakeConfig(n, configOptions...) + cfg, err := MakeConfig(ctx, n, configOptions...) if err != nil { return fmt.Errorf("making config: %w", err) } @@ -235,12 +259,17 @@ func (n *Node) Init(genesis *types.GenesisDoc, peers []string, configOptions ... return fmt.Errorf("writing address book: %w", err) } - err = n.Instance.Commit() - if err != nil { + if err := n.Instance.Build().Commit(ctx); err != nil { return fmt.Errorf("committing instance: %w", err) } - if err = n.Instance.AddFolder(nodeDir, remoteRootDir, "10001:10001"); err != nil { + for _, sc := range n.sidecars { + if err := n.Instance.Sidecars().Add(ctx, sc); err != nil { + return fmt.Errorf("adding sidecar: %w", err) + } + } + + if err = n.Instance.Storage().AddFolder(nodeDir, remoteRootDir, "10001:10001"); err != nil { return fmt.Errorf("copying over node %s directory: %w", n.Name, err) } return nil @@ -249,8 +278,8 @@ func (n *Node) Init(genesis *types.GenesisDoc, peers []string, configOptions ... // AddressP2P returns a P2P endpoint address for the node. This is used for // populating the address book. This will look something like: // 3314051954fc072a0678ec0cbac690ad8676ab98@61.108.66.220:26656 -func (n Node) AddressP2P(withID bool) string { - ip, err := n.Instance.GetIP() +func (n Node) AddressP2P(ctx context.Context, withID bool) string { + ip, err := n.Instance.Network().GetIP(ctx) if err != nil { panic(err) } @@ -275,8 +304,8 @@ func (n Node) AddressRPC() string { // } // RemoteAddressGRPC retrieves the gRPC endpoint address of a node within the cluster. -func (n Node) RemoteAddressGRPC() (string, error) { - ip, err := n.Instance.GetIP() +func (n Node) RemoteAddressGRPC(ctx context.Context) (string, error) { + ip, err := n.Instance.Network().GetIP(ctx) if err != nil { return "", err } @@ -284,8 +313,8 @@ func (n Node) RemoteAddressGRPC() (string, error) { } // RemoteAddressRPC retrieves the RPC endpoint address of a node within the cluster. -func (n Node) RemoteAddressRPC() (string, error) { - ip, err := n.Instance.GetIP() +func (n Node) RemoteAddressRPC(ctx context.Context) (string, error) { + ip, err := n.Instance.Network().GetIP(ctx) if err != nil { return "", err } @@ -296,8 +325,8 @@ func (n Node) AddressTracing() string { return n.traceProxyHost } -func (n Node) RemoteAddressTracing() (string, error) { - ip, err := n.Instance.GetIP() +func (n Node) RemoteAddressTracing(ctx context.Context) (string, error) { + ip, err := n.Instance.Network().GetIP(ctx) if err != nil { return "", err } @@ -313,24 +342,25 @@ func (n Node) Client() (*http.HTTP, error) { return http.New(n.AddressRPC(), "/websocket") } -func (n *Node) Start() error { - if err := n.StartAsync(); err != nil { +func (n *Node) Start(ctx context.Context) error { + if err := n.StartAsync(ctx); err != nil { return err } - return n.WaitUntilStartedAndForwardPorts() + return n.WaitUntilStartedAndCreateProxy(ctx) } -func (n *Node) StartAsync() error { - return n.Instance.StartAsync() +func (n *Node) StartAsync(ctx context.Context) error { + return n.Instance.Execution().StartAsync(ctx) } -func (n *Node) WaitUntilStartedAndForwardPorts() error { - if err := n.Instance.WaitInstanceIsRunning(); err != nil { +func (n *Node) WaitUntilStartedAndCreateProxy(ctx context.Context) error { + if err := n.Instance.Execution().WaitInstanceIsRunning(ctx); err != nil { return err } - err, rpcProxyHost := n.Instance.AddHost(rpcPort) + // TODO: It is recommended to use AddHostWithReadyCheck for the proxy + rpcProxyHost, err := n.Instance.Network().AddHost(ctx, rpcPort) if err != nil { return err } @@ -343,7 +373,8 @@ func (n *Node) WaitUntilStartedAndForwardPorts() error { // } // n.grpcProxyHost = grpcProxyHost - err, traceProxyHost := n.Instance.AddHost(tracingPort) + // TODO: It is recommended to use AddHostWithReadyCheck for the proxy + traceProxyHost, err := n.Instance.Network().AddHost(ctx, tracingPort) if err != nil { return err } @@ -364,12 +395,22 @@ func (n *Node) GenesisValidator() genesis.Validator { } } -func (n *Node) Upgrade(version string) error { - if err := n.Instance.SetImageInstant(DockerImageName(version)); err != nil { +func (n *Node) Upgrade(ctx context.Context, version string) error { + if err := n.Instance.Execution().Stop(ctx); err != nil { + return err + } + + if err := n.Instance.Execution().SetImage(ctx, DockerImageName(version)); err != nil { return err } - return n.Instance.WaitInstanceIsRunning() + // New set of args can be set here + // Or/and the start command can also be set here + + if err := n.Instance.Build().Commit(ctx); err != nil { + return err + } + return n.Instance.Execution().Start(ctx) } func DockerImageName(version string) string { diff --git a/test/e2e/testnet/setup.go b/test/e2e/testnet/setup.go index 620b46cb66..d7db215c34 100644 --- a/test/e2e/testnet/setup.go +++ b/test/e2e/testnet/setup.go @@ -1,6 +1,7 @@ package testnet import ( + "context" "fmt" "strings" "time" @@ -12,14 +13,14 @@ import ( "github.com/tendermint/tendermint/p2p/pex" ) -func MakeConfig(node *Node, opts ...Option) (*config.Config, error) { +func MakeConfig(ctx context.Context, node *Node, opts ...Option) (*config.Config, error) { cfg := app.DefaultConsensusConfig() cfg.TxIndex.Indexer = "kv" cfg.Consensus.TimeoutPropose = config.DefaultConsensusConfig().TimeoutPropose cfg.Consensus.TimeoutCommit = config.DefaultConsensusConfig().TimeoutCommit cfg.Moniker = node.Name cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" - cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) + cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(ctx, false)) cfg.P2P.PersistentPeers = strings.Join(node.InitialPeers, ",") cfg.Instrumentation.Prometheus = true diff --git a/test/e2e/testnet/testnet.go b/test/e2e/testnet/testnet.go index 300777b74b..8cdba539b0 100644 --- a/test/e2e/testnet/testnet.go +++ b/test/e2e/testnet/testnet.go @@ -3,6 +3,7 @@ package testnet import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -12,12 +13,18 @@ import ( "github.com/celestiaorg/celestia-app/v3/app/encoding" "github.com/celestiaorg/celestia-app/v3/test/util/genesis" "github.com/celestiaorg/knuu/pkg/knuu" + "github.com/celestiaorg/knuu/pkg/preloader" "github.com/cosmos/cosmos-sdk/crypto/hd" "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/rs/zerolog/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) +const ( + DefaultSeed int64 = 42 + DefaultChainID = "test-chain-id" +) + type Testnet struct { seed int64 nodes []*Node @@ -25,28 +32,35 @@ type Testnet struct { keygen *keyGenerator grafana *GrafanaInfo txClients []*TxSim + knuu *knuu.Knuu } -func New(name string, seed int64, grafana *GrafanaInfo, chainID string, - genesisModifiers ...genesis.Modifier) ( - *Testnet, error, -) { - identifier := fmt.Sprintf("%s_%s", name, time.Now().Format("20060102_150405")) - if err := knuu.InitializeWithScope(identifier); err != nil { - return nil, err - } +type Options struct { + Seed int64 + Grafana *GrafanaInfo + ChainID string + GenesisModifiers []genesis.Modifier +} +func New(knuu *knuu.Knuu, opts Options) (*Testnet, error) { + opts.setDefaults() return &Testnet{ - seed: seed, + seed: opts.Seed, nodes: make([]*Node, 0), - genesis: genesis.NewDefaultGenesis().WithChainID(identifier).WithModifiers(genesisModifiers...), - keygen: newKeyGenerator(seed), - grafana: grafana, + genesis: genesis.NewDefaultGenesis().WithChainID(opts.ChainID).WithModifiers(opts.GenesisModifiers...), + keygen: newKeyGenerator(opts.Seed), + grafana: opts.Grafana, + knuu: knuu, }, nil } -func (t *Testnet) ChainID() string { - return t.genesis.ChainID +func (t *Testnet) NewPreloader() (*preloader.Preloader, error) { + if t.knuu == nil { + return nil, errors.New("knuu is not initialized") + } + // Since there is one dedicated knuu object for the testnet, each one has its own namespace, and + // there is one preloader per testnet, can use the same preloader name for all nodes + return preloader.New("preloader", t.knuu.SystemDependencies) } func (t *Testnet) SetConsensusParams(params *tmproto.ConsensusParams) { @@ -57,12 +71,10 @@ func (t *Testnet) SetConsensusMaxBlockSize(size int64) { t.genesis.ConsensusParams.Block.MaxBytes = size } -func (t *Testnet) CreateGenesisNode(version string, selfDelegation, upgradeHeight int64, resources Resources) error { +func (t *Testnet) CreateGenesisNode(ctx context.Context, version string, selfDelegation, upgradeHeightV2 int64, resources Resources, disableBBR bool) error { signerKey := t.keygen.Generate(ed25519Type) networkKey := t.keygen.Generate(ed25519Type) - node, err := NewNode(fmt.Sprintf("val%d", len(t.nodes)), version, 0, - selfDelegation, nil, signerKey, networkKey, upgradeHeight, resources, - t.grafana) + node, err := NewNode(ctx, fmt.Sprintf("val%d", len(t.nodes)), version, 0, selfDelegation, nil, signerKey, networkKey, upgradeHeightV2, resources, t.grafana, t.knuu, disableBBR) if err != nil { return err } @@ -73,26 +85,27 @@ func (t *Testnet) CreateGenesisNode(version string, selfDelegation, upgradeHeigh return nil } -func (t *Testnet) CreateGenesisNodes(num int, version string, selfDelegation, upgradeHeight int64, resources Resources) error { +func (t *Testnet) CreateGenesisNodes(ctx context.Context, num int, version string, selfDelegation, upgradeHeightV2 int64, resources Resources, disableBBR bool) error { for i := 0; i < num; i++ { - if err := t.CreateGenesisNode(version, selfDelegation, upgradeHeight, resources); err != nil { + if err := t.CreateGenesisNode(ctx, version, selfDelegation, upgradeHeightV2, resources, disableBBR); err != nil { return err } } return nil } -func (t *Testnet) CreateTxClients(version string, +func (t *Testnet) CreateTxClients(ctx context.Context, + version string, sequences int, blobRange string, blobPerSequence int, resources Resources, grpcEndpoints []string, + upgradeSchedule map[int64]uint64, ) error { for i, grpcEndpoint := range grpcEndpoints { name := fmt.Sprintf("txsim%d", i) - err := t.CreateTxClient(name, version, sequences, - blobRange, blobPerSequence, resources, grpcEndpoint) + err := t.CreateTxClient(ctx, name, version, sequences, blobRange, blobPerSequence, resources, grpcEndpoint, upgradeSchedule) if err != nil { log.Err(err).Str("name", name). Str("grpc endpoint", grpcEndpoint). @@ -107,46 +120,77 @@ func (t *Testnet) CreateTxClients(version string, return nil } -// CreateTxClient creates a txsim node and sets it up -// name: name of the txsim knuu instance -// version: version of the txsim docker image to be pulled from the registry -// specified by txsimDockerSrcURL -// seed: seed for the txsim -// sequences: number of sequences to be run by the txsim -// blobRange: range of blob sizes to be used by the txsim in bytes -// pollTime: time in seconds between each sequence -// resources: resources to be allocated to the txsim -// grpcEndpoint: grpc endpoint of the node to which the txsim will connect and send transactions -func (t *Testnet) CreateTxClient(name, +// CreateTxClient creates a txsim node and sets it up. +// +// Parameters: +// ctx: Context for managing the lifecycle. +// name: Name of the txsim knuu instance. +// version: Version of the txsim Docker image to pull. +// blobSequences: Number of blob sequences to run by the txsim. +// blobRange: Range of blob sizes in bytes used by the txsim. +// blobPerSequence: Number of blobs per sequence. +// resources: Resources allocated to the txsim. +// grpcEndpoint: gRPC endpoint of the node for transaction submission. +// upgradeSchedule: Map from height to version for scheduled upgrades (v3 and onwards). +func (t *Testnet) CreateTxClient( + ctx context.Context, + name string, version string, - sequences int, + blobSequences int, blobRange string, blobPerSequence int, resources Resources, grpcEndpoint string, + upgradeSchedule map[int64]uint64, ) error { - // create an account, and store it in a temp directory and add the account as genesis account to - // the testnet txsimKeyringDir := filepath.Join(os.TempDir(), name) - log.Info(). - Str("name", name). - Str("directory", txsimKeyringDir). - Msg("txsim keyring directory created") - _, err := t.CreateAccount(name, 1e16, txsimKeyringDir) + defer os.RemoveAll(txsimKeyringDir) + + config := encoding.MakeConfig(app.ModuleEncodingRegisters...).Codec + txsimKeyring, err := keyring.New(app.Name, keyring.BackendTest, txsimKeyringDir, nil, config) if err != nil { - return err + return fmt.Errorf("failed to create keyring: %w", err) + } + + key, _, err := txsimKeyring.NewMnemonic(name, keyring.English, "", "", hd.Secp256k1) + if err != nil { + return fmt.Errorf("failed to create mnemonic: %w", err) + } + pk, err := key.GetPubKey() + if err != nil { + return fmt.Errorf("failed to get public key: %w", err) + } + err = t.genesis.AddAccount(genesis.Account{ + PubKey: pk, + Balance: 1e16, + Name: name, + }) + if err != nil { + return fmt.Errorf("failed to add account to genesis: %w", err) + } + + // Copy the keys from the genesis keyring to the txsim keyring so that txsim + // can submit MsgSignalVersion on behalf of the validators. + for _, node := range t.Nodes() { + armor, err := t.Genesis().Keyring().ExportPrivKeyArmor(node.Name, "") + if err != nil { + return fmt.Errorf("failed to export key: %w", err) + } + err = txsimKeyring.ImportPrivKey(node.Name, armor, "") + if err != nil { + return fmt.Errorf("failed to import key: %w", err) + } } - // Create a txsim node using the key stored in the txsimKeyringDir - txsim, err := CreateTxClient(name, version, grpcEndpoint, t.seed, - sequences, blobRange, blobPerSequence, 1, resources, txsimRootDir) + txsim, err := CreateTxClient(ctx, name, version, grpcEndpoint, t.seed, blobSequences, blobRange, blobPerSequence, 1, resources, txsimKeyringDir, t.knuu, upgradeSchedule) if err != nil { log.Err(err). Str("name", name). Msg("error creating txsim") return err } - err = txsim.Instance.Commit() + + err = txsim.Instance.Build().Commit(ctx) if err != nil { log.Err(err). Str("name", name). @@ -155,7 +199,7 @@ func (t *Testnet) CreateTxClient(name, } // copy over the keyring directory to the txsim instance - err = txsim.Instance.AddFolder(txsimKeyringDir, txsimRootDir, "10001:10001") + err = txsim.Instance.Storage().AddFolder(txsimKeyringDir, txsimKeyringDir, "10001:10001") if err != nil { log.Err(err). Str("directory", txsimKeyringDir). @@ -168,9 +212,9 @@ func (t *Testnet) CreateTxClient(name, return nil } -func (t *Testnet) StartTxClients() error { +func (t *Testnet) StartTxClients(ctx context.Context) error { for _, txsim := range t.txClients { - err := txsim.Instance.StartWithoutWait() + err := txsim.Instance.Execution().StartAsync(ctx) if err != nil { log.Err(err). Str("name", txsim.Name). @@ -183,7 +227,7 @@ func (t *Testnet) StartTxClients() error { } // wait for txsims to start for _, txsim := range t.txClients { - err := txsim.Instance.WaitInstanceIsRunning() + err := txsim.Instance.Execution().WaitInstanceIsRunning(ctx) if err != nil { return fmt.Errorf("txsim %s failed to run: %w", txsim.Name, err) } @@ -192,16 +236,6 @@ func (t *Testnet) StartTxClients() error { return nil } -func (t *Testnet) StopTxClients() error { - for _, txsim := range t.txClients { - err := txsim.Instance.Stop() - if err != nil { - return err - } - } - return nil -} - // CreateAccount creates an account and adds it to the // testnet genesis. The account is created with the given name and tokens and // is persisted in the given txsimKeyringDir. @@ -214,8 +248,7 @@ func (t *Testnet) CreateAccount(name string, tokens int64, txsimKeyringDir strin if txsimKeyringDir == "" { kr = keyring.NewInMemory(cdc) } else { // create a keyring with the specified directory - kr, err = keyring.New(app.Name, keyring.BackendTest, - txsimKeyringDir, nil, cdc) + kr, err = keyring.New(app.Name, keyring.BackendTest, txsimKeyringDir, nil, cdc) if err != nil { return nil, err } @@ -245,12 +278,10 @@ func (t *Testnet) CreateAccount(name string, tokens int64, txsimKeyringDir strin return kr, nil } -func (t *Testnet) CreateNode(version string, startHeight, upgradeHeight int64, resources Resources) error { +func (t *Testnet) CreateNode(ctx context.Context, version string, startHeight, upgradeHeight int64, resources Resources, disableBBR bool) error { signerKey := t.keygen.Generate(ed25519Type) networkKey := t.keygen.Generate(ed25519Type) - node, err := NewNode(fmt.Sprintf("val%d", len(t.nodes)), version, - startHeight, 0, nil, signerKey, networkKey, upgradeHeight, resources, - t.grafana) + node, err := NewNode(ctx, fmt.Sprintf("val%d", len(t.nodes)), version, startHeight, 0, nil, signerKey, networkKey, upgradeHeight, resources, t.grafana, t.knuu, disableBBR) if err != nil { return err } @@ -258,7 +289,7 @@ func (t *Testnet) CreateNode(version string, startHeight, upgradeHeight int64, r return nil } -func (t *Testnet) Setup(configOpts ...Option) error { +func (t *Testnet) Setup(ctx context.Context, configOpts ...Option) error { genesis, err := t.genesis.Export() if err != nil { return err @@ -270,11 +301,11 @@ func (t *Testnet) Setup(configOpts ...Option) error { peers := make([]string, 0, len(t.nodes)-1) for _, peer := range t.nodes { if peer.Name != node.Name { - peers = append(peers, peer.AddressP2P(true)) + peers = append(peers, peer.AddressP2P(ctx, true)) } } - err := node.Init(genesis, peers, configOpts...) + err := node.Init(ctx, genesis, peers, configOpts...) if err != nil { return err } @@ -302,10 +333,10 @@ func (t *Testnet) RPCEndpoints() []string { // RemoteGRPCEndpoints retrieves the gRPC endpoint addresses of the // testnet's validator nodes. -func (t *Testnet) RemoteGRPCEndpoints() ([]string, error) { +func (t *Testnet) RemoteGRPCEndpoints(ctx context.Context) ([]string, error) { grpcEndpoints := make([]string, len(t.nodes)) for idx, node := range t.nodes { - grpcEP, err := node.RemoteAddressGRPC() + grpcEP, err := node.RemoteAddressGRPC(ctx) if err != nil { return nil, err } @@ -324,10 +355,10 @@ func (t *Testnet) GetGenesisValidators() []genesis.Validator { // RemoteRPCEndpoints retrieves the RPC endpoint addresses of the testnet's // validator nodes. -func (t *Testnet) RemoteRPCEndpoints() ([]string, error) { +func (t *Testnet) RemoteRPCEndpoints(ctx context.Context) ([]string, error) { rpcEndpoints := make([]string, len(t.nodes)) for idx, node := range t.nodes { - grpcEP, err := node.RemoteAddressRPC() + grpcEP, err := node.RemoteAddressRPC(ctx) if err != nil { return nil, err } @@ -338,13 +369,14 @@ func (t *Testnet) RemoteRPCEndpoints() ([]string, error) { // WaitToSync waits for the started nodes to sync with the network and move // past the genesis block. -func (t *Testnet) WaitToSync() error { +func (t *Testnet) WaitToSync(ctx context.Context) error { genesisNodes := make([]*Node, 0) for _, node := range t.nodes { if node.StartHeight == 0 { genesisNodes = append(genesisNodes, node) } } + for _, node := range genesisNodes { log.Info().Str("name", node.Name).Msg( "waiting for node to sync") @@ -353,7 +385,7 @@ func (t *Testnet) WaitToSync() error { return fmt.Errorf("failed to initialize client for node %s: %w", node.Name, err) } for i := 0; i < 10; i++ { - resp, err := client.Status(context.Background()) + resp, err := client.Status(ctx) if err == nil { if resp.SyncInfo.LatestBlockHeight > 0 { log.Info().Int("attempts", i).Str("name", node.Name).Msg( @@ -375,68 +407,57 @@ func (t *Testnet) WaitToSync() error { return nil } -// StartNodes starts the testnet nodes and forwards the ports. +// StartNodes starts the testnet nodes and setup proxies. // It does not wait for the nodes to produce blocks. // For that, use WaitToSync. -func (t *Testnet) StartNodes() error { +func (t *Testnet) StartNodes(ctx context.Context) error { genesisNodes := make([]*Node, 0) + // identify genesis nodes for _, node := range t.nodes { if node.StartHeight == 0 { genesisNodes = append(genesisNodes, node) } - } - // start genesis nodes asynchronously - for _, node := range genesisNodes { - err := node.StartAsync() + + err := node.StartAsync(ctx) if err != nil { return fmt.Errorf("node %s failed to start: %w", node.Name, err) } } - log.Info().Msg("forwarding ports for genesis nodes") + + log.Info().Msg("create endpoint proxies for genesis nodes") // wait for instances to be running for _, node := range genesisNodes { - err := node.WaitUntilStartedAndForwardPorts() + err := node.WaitUntilStartedAndCreateProxy(ctx) if err != nil { + log.Err(err).Str("name", node.Name).Str("version", + node.Version).Msg("failed to start and forward ports") return fmt.Errorf("node %s failed to start: %w", node.Name, err) } + log.Info().Str("name", node.Name).Str("version", + node.Version).Msg("started and ports forwarded") } return nil } -func (t *Testnet) Start() error { - // start nodes and forward ports - err := t.StartNodes() +func (t *Testnet) Start(ctx context.Context) error { + // start nodes and setup proxies + err := t.StartNodes(ctx) if err != nil { return err } // wait for nodes to sync log.Info().Msg("waiting for genesis nodes to sync") - err = t.WaitToSync() + err = t.WaitToSync(ctx) if err != nil { return err } - return t.StartTxClients() + return t.StartTxClients(ctx) } -func (t *Testnet) Cleanup() { - // cleanup txsim - for _, txsim := range t.txClients { - err := txsim.Instance.Destroy() - if err != nil { - log.Err(err). - Str("name", txsim.Name). - Msg("txsim failed to cleanup") - } - } - // cleanup nodes - for _, node := range t.nodes { - err := node.Instance.Destroy() - if err != nil { - log.Err(err). - Str("name", node.Name). - Msg("node failed to cleanup") - } +func (t *Testnet) Cleanup(ctx context.Context) { + if err := t.knuu.CleanUp(ctx); err != nil { + log.Err(err).Msg("failed to cleanup knuu") } } @@ -447,3 +468,16 @@ func (t *Testnet) Node(i int) *Node { func (t *Testnet) Nodes() []*Node { return t.nodes } + +func (t *Testnet) Genesis() *genesis.Genesis { + return t.genesis +} + +func (o *Options) setDefaults() { + if o.ChainID == "" { + o.ChainID = DefaultChainID + } + if o.Seed == 0 { + o.Seed = DefaultSeed + } +} diff --git a/test/e2e/testnet/txsimNode.go b/test/e2e/testnet/txsimNode.go index 994d87a831..c63062598b 100644 --- a/test/e2e/testnet/txsimNode.go +++ b/test/e2e/testnet/txsimNode.go @@ -2,8 +2,12 @@ package testnet import ( + "context" "fmt" + "strings" + "github.com/celestiaorg/go-square/v2/share" + "github.com/celestiaorg/knuu/pkg/instance" "github.com/celestiaorg/knuu/pkg/knuu" "github.com/rs/zerolog/log" ) @@ -18,19 +22,24 @@ func txsimDockerImageName(version string) string { type TxSim struct { Name string - Instance *knuu.Instance + Instance *instance.Instance } +// CreateTxClient returns a new TxSim instance. func CreateTxClient( - name, version string, + ctx context.Context, + name string, + version string, endpoint string, seed int64, - sequences int, + blobSequences int, blobRange string, blobsPerSeq int, pollTime int, resources Resources, volumePath string, + knuu *knuu.Knuu, + upgradeSchedule map[int64]uint64, ) (*TxSim, error) { instance, err := knuu.NewInstance(name) if err != nil { @@ -41,7 +50,7 @@ func CreateTxClient( Str("name", name). Str("image", image). Msg("setting image for tx client") - err = instance.SetImage(image) + err = instance.Build().SetImage(ctx, image) if err != nil { log.Err(err). Str("name", name). @@ -49,35 +58,56 @@ func CreateTxClient( Msg("failed to set image for tx client") return nil, err } - err = instance.SetMemory(resources.MemoryRequest, resources.MemoryLimit) + err = instance.Resources().SetMemory(resources.MemoryRequest, resources.MemoryLimit) if err != nil { return nil, err } - err = instance.SetCPU(resources.CPU) + err = instance.Resources().SetCPU(resources.CPU) if err != nil { return nil, err } - err = instance.AddVolumeWithOwner(volumePath, resources.Volume, 10001) + err = instance.Storage().AddVolumeWithOwner(volumePath, resources.Volume, 10001) if err != nil { return nil, err } args := []string{ - fmt.Sprintf("-k %d", 0), - fmt.Sprintf("-g %s", endpoint), - fmt.Sprintf("-t %ds", pollTime), - fmt.Sprintf("-b %d ", sequences), - fmt.Sprintf("-d %d ", seed), - fmt.Sprintf("-a %d ", blobsPerSeq), - fmt.Sprintf("-s %s ", blobRange), + fmt.Sprintf("--key-path %s", volumePath), + fmt.Sprintf("--grpc-endpoint %s", endpoint), + fmt.Sprintf("--poll-time %ds", pollTime), + fmt.Sprintf("--seed %d", seed), + fmt.Sprintf("--blob %d", blobSequences), + fmt.Sprintf("--blob-amounts %d", blobsPerSeq), + fmt.Sprintf("--blob-sizes %s", blobRange), + fmt.Sprintf("--blob-share-version %d", share.ShareVersionZero), } - err = instance.SetArgs(args...) - if err != nil { + if len(upgradeSchedule) > 0 { + args = append(args, fmt.Sprintf("--upgrade-schedule %s", stringifyUpgradeSchedule(upgradeSchedule))) + } + + if err := instance.Build().SetArgs(args...); err != nil { return nil, err } + log.Info(). + Str("name", name). + Str("image", image). + Str("args", strings.Join(args, " ")). + Msg("created tx client") + return &TxSim{ Name: name, Instance: instance, }, nil } + +func stringifyUpgradeSchedule(schedule map[int64]uint64) string { + if schedule == nil { + return "" + } + scheduleParts := make([]string, 0, len(schedule)) + for height, version := range schedule { + scheduleParts = append(scheduleParts, fmt.Sprintf("%d:%d", height, version)) + } + return strings.Join(scheduleParts, ",") +} diff --git a/test/e2e/testnet/util.go b/test/e2e/testnet/util.go index 0d89494858..edf89aceda 100644 --- a/test/e2e/testnet/util.go +++ b/test/e2e/testnet/util.go @@ -1,43 +1,11 @@ package testnet import ( - "io" - "math/rand" "os" "github.com/rs/zerolog/log" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/secp256k1" ) -type keyGenerator struct { - random *rand.Rand -} - -func newKeyGenerator(seed int64) *keyGenerator { - return &keyGenerator{ - random: rand.New(rand.NewSource(seed)), //nolint:gosec - } -} - -func (g *keyGenerator) Generate(keyType string) crypto.PrivKey { - seed := make([]byte, ed25519.SeedSize) - - _, err := io.ReadFull(g.random, seed) - if err != nil { - panic(err) // this shouldn't happen - } - switch keyType { - case "secp256k1": - return secp256k1.GenPrivKeySecp256k1(seed) - case "", "ed25519": - return ed25519.GenPrivKeyFromSecret(seed) - default: - panic("KeyType not supported") // should not make it this far - } -} - type GrafanaInfo struct { Endpoint string Username string diff --git a/test/interchain/go.mod b/test/interchain/go.mod index bbcd6956b1..2b6b52b585 100644 --- a/test/interchain/go.mod +++ b/test/interchain/go.mod @@ -1,6 +1,6 @@ module github.com/celestiaorg/celestia-app/test/interchain -go 1.22.6 +go 1.23.1 require ( cosmossdk.io/math v1.3.0 diff --git a/test/txsim/run_test.go b/test/txsim/run_test.go index 46e4523710..d4cbdf22ba 100644 --- a/test/txsim/run_test.go +++ b/test/txsim/run_test.go @@ -12,12 +12,17 @@ import ( "github.com/celestiaorg/celestia-app/v3/app" "github.com/celestiaorg/celestia-app/v3/app/encoding" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" "github.com/celestiaorg/celestia-app/v3/test/txsim" "github.com/celestiaorg/celestia-app/v3/test/util/testnode" "github.com/cosmos/cosmos-sdk/crypto/keyring" sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" blob "github.com/celestiaorg/celestia-app/v3/x/blob/types" + signaltypes "github.com/celestiaorg/celestia-app/v3/x/signal/types" bank "github.com/cosmos/cosmos-sdk/x/bank/types" distribution "github.com/cosmos/cosmos-sdk/x/distribution/types" staking "github.com/cosmos/cosmos-sdk/x/staking/types" @@ -152,3 +157,52 @@ func Setup(t testing.TB) (keyring.Keyring, string, string) { return cctx.Keyring, rpcAddr, grpcAddr } + +func TestTxSimUpgrade(t *testing.T) { + if testing.Short() { + t.Skip("skipping TestTxSimUpgrade in short mode.") + } + cp := app.DefaultConsensusParams() + cp.Version.AppVersion = v2.Version + cfg := testnode.DefaultConfig(). + WithTimeoutCommit(300 * time.Millisecond). + WithConsensusParams(cp). + WithFundedAccounts("txsim-master") + cctx, _, grpcAddr := testnode.NewNetwork(t, cfg) + + require.NoError(t, cctx.WaitForNextBlock()) + + // updrade to v3 at height 20 + sequences := []txsim.Sequence{ + txsim.NewUpgradeSequence(v3.Version, 20), + } + + opts := txsim.DefaultOptions(). + // SuppressLogs(). + WithPollTime(time.Millisecond * 100) + + err := txsim.Run( + cctx.GoContext(), + grpcAddr, + cctx.Keyring, + encoding.MakeConfig(app.ModuleEncodingRegisters...), + opts, + sequences..., + ) + require.NoError(t, err) + + conn, err := grpc.NewClient(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + defer conn.Close() + + querier := signaltypes.NewQueryClient(conn) + + // We can't check that the upgrade was successful because the upgrade height is thousands of blocks away + // and even at 300 millisecond block times, it would take too long. Instead we just want to assert + // that the upgrade is ready to be performed + require.Eventually(t, func() bool { + upgradePlan, err := querier.GetUpgrade(cctx.GoContext(), &signaltypes.QueryGetUpgradeRequest{}) + require.NoError(t, err) + return upgradePlan.Upgrade != nil && upgradePlan.Upgrade.AppVersion == v3.Version + }, time.Second*20, time.Millisecond*100) +} diff --git a/test/txsim/upgrade.go b/test/txsim/upgrade.go new file mode 100644 index 0000000000..e2015b98d1 --- /dev/null +++ b/test/txsim/upgrade.go @@ -0,0 +1,90 @@ +package txsim + +import ( + "context" + "errors" + "math/rand" + + signaltypes "github.com/celestiaorg/celestia-app/v3/x/signal/types" + "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/gogo/protobuf/grpc" +) + +var _ Sequence = &UpgradeSequence{} + +const fundsForUpgrade = 100_000 + +// UpgradeSequence simulates a sequence of validators submitting +// MsgSignalVersions for a particular version and then eventually a +// MsgTryUpgrade. +type UpgradeSequence struct { + // signalled is a map from validator address to a boolean indicating if they have signalled. + signalled map[string]bool + // height is the first height at which the upgrade sequence is run. + height int64 + // version is the version that validators are signalling for. + version uint64 + // account is the address of the account that submits the MsgTryUpgrade. + account types.AccAddress + // hasUpgraded is true if the MsgTryUpgrade has been submitted. + hasUpgraded bool +} + +func NewUpgradeSequence(version uint64, height int64) *UpgradeSequence { + return &UpgradeSequence{version: version, height: height, signalled: make(map[string]bool)} +} + +func (s *UpgradeSequence) Clone(_ int) []Sequence { + panic("cloning not supported for upgrade sequence. Only a single sequence is needed") +} + +// this is a no-op for the upgrade sequence +func (s *UpgradeSequence) Init(_ context.Context, _ grpc.ClientConn, allocateAccounts AccountAllocator, _ *rand.Rand, _ bool) { + s.account = allocateAccounts(1, fundsForUpgrade)[0] +} + +func (s *UpgradeSequence) Next(ctx context.Context, querier grpc.ClientConn, _ *rand.Rand) (Operation, error) { + if s.hasUpgraded { + return Operation{}, ErrEndOfSequence + } + + stakingQuerier := stakingtypes.NewQueryClient(querier) + validatorsResp, err := stakingQuerier.Validators(ctx, &stakingtypes.QueryValidatorsRequest{}) + if err != nil { + return Operation{}, err + } + + if len(validatorsResp.Validators) == 0 { + return Operation{}, errors.New("no validators found") + } + + delay := uint64(0) + // apply a delay to the first signal only + if len(s.signalled) == 0 { + delay = uint64(s.height) + } + + // Choose a random validator to be the authority + for _, validator := range validatorsResp.Validators { + if !s.signalled[validator.OperatorAddress] { + s.signalled[validator.OperatorAddress] = true + msg := &signaltypes.MsgSignalVersion{ + ValidatorAddress: validator.OperatorAddress, + Version: s.version, + } + return Operation{ + Msgs: []types.Msg{msg}, + Delay: delay, + }, nil + } + } + + // if all validators have voted, we can now try to upgrade. + s.hasUpgraded = true + msg := signaltypes.NewMsgTryUpgrade(s.account) + return Operation{ + Msgs: []types.Msg{msg}, + Delay: delay, + }, nil +} diff --git a/test/util/blobfactory/payforblob_factory.go b/test/util/blobfactory/payforblob_factory.go index 1a4aa6d10c..f9b05359bc 100644 --- a/test/util/blobfactory/payforblob_factory.go +++ b/test/util/blobfactory/payforblob_factory.go @@ -245,10 +245,10 @@ func ManyMultiBlobTx( accounts []string, accInfos []AccountInfo, blobs [][]*share.Blob, + opts ...user.TxOption, ) [][]byte { t.Helper() txs := make([][]byte, len(accounts)) - opts := DefaultTxOpts() for i, acc := range accounts { signer, err := user.NewSigner(kr, enc, chainid, appconsts.LatestVersion, user.NewAccount(acc, accInfos[i].AccountNum, accInfos[i].Sequence)) require.NoError(t, err) diff --git a/test/util/genesis/document.go b/test/util/genesis/document.go index 3cc61f08b9..f45921431b 100644 --- a/test/util/genesis/document.go +++ b/test/util/genesis/document.go @@ -24,6 +24,7 @@ func Document( chainID string, gentxs []json.RawMessage, accounts []Account, + genesisTime time.Time, mods ...Modifier, ) (*coretypes.GenesisDoc, error) { genutilGenState := genutiltypes.DefaultGenesisState() @@ -73,7 +74,7 @@ func Document( // Create the genesis doc genesisDoc := &coretypes.GenesisDoc{ ChainID: chainID, - GenesisTime: time.Now(), + GenesisTime: genesisTime, ConsensusParams: params, AppState: stateBz, } @@ -101,7 +102,6 @@ func accountsToSDKTypes(accounts []Account) ([]banktypes.Balance, []authtypes.Ge ) genBals[i] = banktypes.Balance{Address: addr.String(), Coins: balances.Sort()} - genAccs[i] = authtypes.NewBaseAccount(addr, account.PubKey, uint64(i), 0) } return genBals, genAccs, nil diff --git a/test/util/genesis/files.go b/test/util/genesis/files.go index ff702d3c15..6d2452187b 100644 --- a/test/util/genesis/files.go +++ b/test/util/genesis/files.go @@ -5,6 +5,7 @@ import ( "os" "path/filepath" + srvconfig "github.com/cosmos/cosmos-sdk/server/config" "github.com/tendermint/tendermint/config" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/p2p" @@ -17,53 +18,58 @@ import ( func InitFiles( rootDir string, tmConfig *config.Config, + appCfg *srvconfig.Config, genesis *Genesis, validatorIndex int, -) (basePath string, err error) { +) error { val, has := genesis.Validator(validatorIndex) if !has { - return "", fmt.Errorf("validator %d not found", validatorIndex) + return fmt.Errorf("validator %d not found", validatorIndex) } - basePath = filepath.Join(rootDir, ".celestia-app") - tmConfig.SetRoot(basePath) + tmConfig.SetRoot(rootDir) // save the genesis file - configPath := filepath.Join(basePath, "config") - err = os.MkdirAll(configPath, os.ModePerm) + configPath := filepath.Join(rootDir, "config") + err := os.MkdirAll(configPath, os.ModePerm) if err != nil { - return "", err + return err } genesisDoc, err := genesis.Export() if err != nil { - return "", fmt.Errorf("exporting genesis: %w", err) + return fmt.Errorf("exporting genesis: %w", err) } err = genesisDoc.SaveAs(tmConfig.GenesisFile()) if err != nil { - return "", err + return err } pvStateFile := tmConfig.PrivValidatorStateFile() if err := tmos.EnsureDir(filepath.Dir(pvStateFile), 0o777); err != nil { - return "", err + return err } pvKeyFile := tmConfig.PrivValidatorKeyFile() if err := tmos.EnsureDir(filepath.Dir(pvKeyFile), 0o777); err != nil { - return "", err + return err } filePV := privval.NewFilePV(val.ConsensusKey, pvKeyFile, pvStateFile) filePV.Save() nodeKeyFile := tmConfig.NodeKeyFile() if err := tmos.EnsureDir(filepath.Dir(nodeKeyFile), 0o777); err != nil { - return "", err + return err } nodeKey := &p2p.NodeKey{ PrivKey: val.NetworkKey, } if err := nodeKey.SaveAs(nodeKeyFile); err != nil { - return "", err + return err } - return basePath, nil + appConfigFilePath := filepath.Join(rootDir, "config", "app.toml") + srvconfig.WriteConfigFile(appConfigFilePath, appCfg) + + config.WriteConfigFile(filepath.Join(rootDir, "config", "config.toml"), tmConfig) + + return nil } diff --git a/test/util/genesis/genesis.go b/test/util/genesis/genesis.go index 5a52fdbb29..f0fadf0b59 100644 --- a/test/util/genesis/genesis.go +++ b/test/util/genesis/genesis.go @@ -73,7 +73,7 @@ func NewDefaultGenesis() *Genesis { return g } -// WithModifier adds a genesis modifier to the genesis. +// WithModifiers adds a genesis modifier to the genesis. func (g *Genesis) WithModifiers(ops ...Modifier) *Genesis { g.genOps = append(g.genOps, ops...) return g @@ -97,7 +97,7 @@ func (g *Genesis) WithGenesisTime(genesisTime time.Time) *Genesis { return g } -// WithAccounts adds the given validators to the genesis. +// WithValidators adds the given validators to the genesis. func (g *Genesis) WithValidators(vals ...Validator) *Genesis { for _, val := range vals { err := g.NewValidator(val) @@ -120,6 +120,11 @@ func (g *Genesis) WithKeyringAccounts(accs ...KeyringAccount) *Genesis { return g } +func (g *Genesis) WithKeyring(kr keyring.Keyring) *Genesis { + g.kr = kr + return g +} + // AddAccount adds an existing account to the genesis. func (g *Genesis) AddAccount(account Account) error { if err := account.ValidateBasic(); err != nil { @@ -175,7 +180,7 @@ func (g *Genesis) AddValidator(val Validator) error { return nil } -// Creates a new validator account and adds it to the genesis. +// NewValidator creates a new validator account and adds it to the genesis. func (g *Genesis) NewValidator(val Validator) error { // Add the validator's genesis account if err := g.NewAccount(val.KeyringAccount); err != nil { @@ -208,6 +213,7 @@ func (g *Genesis) Export() (*coretypes.GenesisDoc, error) { g.ChainID, gentxs, g.accounts, + g.GenesisTime, g.genOps..., ) } @@ -220,3 +226,7 @@ func (g *Genesis) Validator(i int) (Validator, bool) { } return Validator{}, false } + +func (g *Genesis) EncodingConfig() encoding.Config { + return g.ecfg +} diff --git a/test/util/test_app.go b/test/util/test_app.go index 8b4a433e9c..f9f40ef91c 100644 --- a/test/util/test_app.go +++ b/test/util/test_app.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "strings" "testing" "time" @@ -43,7 +44,10 @@ import ( const ChainID = testfactory.ChainID -var GenesisTime = time.Date(2023, 1, 1, 1, 1, 1, 1, time.UTC).UTC() +var ( + GenesisTime = time.Date(2023, 1, 1, 1, 1, 1, 1, time.UTC).UTC() + TestAppLogger = log.NewTMLogger(os.Stdout) +) // Get flags every time the simulator is run func init() { @@ -63,10 +67,21 @@ func (ao EmptyAppOptions) Get(_ string) interface{} { // of the app from first genesis account. A no-op logger is set in app. func SetupTestAppWithGenesisValSet(cparams *tmproto.ConsensusParams, genAccounts ...string) (*app.App, keyring.Keyring) { testApp, valSet, kr := NewTestAppWithGenesisSet(cparams, genAccounts...) + initialiseTestApp(testApp, valSet, cparams) + return testApp, kr +} +func SetupTestAppWithGenesisValSetAndMaxSquareSize(cparams *tmproto.ConsensusParams, maxSquareSize int, genAccounts ...string) (*app.App, keyring.Keyring) { + testApp, valSet, kr := NewTestAppWithGenesisSetAndMaxSquareSize(cparams, maxSquareSize, genAccounts...) + initialiseTestApp(testApp, valSet, cparams) + return testApp, kr +} + +func initialiseTestApp(testApp *app.App, valSet *tmtypes.ValidatorSet, cparams *tmproto.ConsensusParams) { // commit genesis changes testApp.Commit() testApp.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{ + Time: time.Now(), ChainID: ChainID, Height: testApp.LastBlockHeight() + 1, AppHash: testApp.LastCommitID().Hash, @@ -76,8 +91,6 @@ func SetupTestAppWithGenesisValSet(cparams *tmproto.ConsensusParams, genAccounts App: cparams.Version.AppVersion, }, }}) - - return testApp, kr } // NewTestApp creates a new app instance with an empty memDB and a no-op logger. @@ -90,7 +103,7 @@ func NewTestApp() *app.App { encCfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) return app.New( - log.NewTMLogger(os.Stdout), db, nil, + TestAppLogger, db, nil, cast.ToUint(emptyOpts.Get(server.FlagInvCheckPeriod)), encCfg, 0, @@ -178,7 +191,27 @@ func SetupDeterministicGenesisState(testApp *app.App, pubKeys []cryptotypes.PubK func NewTestAppWithGenesisSet(cparams *tmproto.ConsensusParams, genAccounts ...string) (*app.App, *tmtypes.ValidatorSet, keyring.Keyring) { testApp := NewTestApp() genesisState, valSet, kr := GenesisStateWithSingleValidator(testApp, genAccounts...) + testApp = InitialiseTestAppWithGenesis(testApp, cparams, genesisState) + return testApp, valSet, kr +} + +// NewTestAppWithGenesisSetAndMaxSquareSize initializes a new app with genesis accounts and a specific max square size +// and returns the testApp, validator set and keyring. +func NewTestAppWithGenesisSetAndMaxSquareSize(cparams *tmproto.ConsensusParams, maxSquareSize int, genAccounts ...string) (*app.App, *tmtypes.ValidatorSet, keyring.Keyring) { + testApp := NewTestApp() + genesisState, valSet, kr := GenesisStateWithSingleValidator(testApp, genAccounts...) + + // hacky way of changing the gov max square size without changing the consts + blobJSON := string(genesisState["blob"]) + replace := strings.Replace(blobJSON, fmt.Sprintf("%d", appconsts.DefaultGovMaxSquareSize), fmt.Sprintf("%d", maxSquareSize), 1) + genesisState["blob"] = json.RawMessage(replace) + testApp = InitialiseTestAppWithGenesis(testApp, cparams, genesisState) + return testApp, valSet, kr +} + +// InitialiseTestAppWithGenesis initializes the provided app with the provided genesis. +func InitialiseTestAppWithGenesis(testApp *app.App, cparams *tmproto.ConsensusParams, genesisState app.GenesisState) *app.App { stateBytes, err := json.MarshalIndent(genesisState, "", " ") if err != nil { panic(err) @@ -208,7 +241,7 @@ func NewTestAppWithGenesisSet(cparams *tmproto.ConsensusParams, genAccounts ...s ChainId: ChainID, }, ) - return testApp, valSet, kr + return testApp } // AddDeterministicValidatorToGenesis adds a set of five validators to the genesis. diff --git a/test/util/testnode/app_wrapper.go b/test/util/testnode/app_wrapper.go new file mode 100644 index 0000000000..e885f4c4ed --- /dev/null +++ b/test/util/testnode/app_wrapper.go @@ -0,0 +1,21 @@ +package testnode + +import ( + "time" + + "github.com/celestiaorg/celestia-app/v3/app" + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/tendermint/tendermint/abci/types" +) + +// wrapEndBlocker overrides the app's endblocker to set the timeout commit to a +// different value for testnode. +func wrapEndBlocker(app *app.App, timeoutCommit time.Duration) func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { + endBlocker := func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { + resp := app.EndBlocker(ctx, req) + resp.Timeouts.TimeoutCommit = timeoutCommit + return resp + } + + return endBlocker +} diff --git a/test/util/testnode/config.go b/test/util/testnode/config.go index 210fbf4bcb..18a0273870 100644 --- a/test/util/testnode/config.go +++ b/test/util/testnode/config.go @@ -174,7 +174,7 @@ func DefaultTendermintConfig() *tmconfig.Config { func DefaultAppCreator() srvtypes.AppCreator { return func(_ log.Logger, _ tmdb.DB, _ io.Writer, _ srvtypes.AppOptions) srvtypes.Application { encodingConfig := encoding.MakeConfig(app.ModuleEncodingRegisters...) - return app.New( + app := app.New( log.NewNopLogger(), tmdb.NewMemDB(), nil, // trace store @@ -184,13 +184,15 @@ func DefaultAppCreator() srvtypes.AppCreator { simapp.EmptyAppOptions{}, baseapp.SetMinGasPrices(fmt.Sprintf("%v%v", appconsts.DefaultMinGasPrice, app.BondDenom)), ) + app.SetEndBlocker(wrapEndBlocker(app, time.Millisecond*30)) + return app } } func CustomAppCreator(minGasPrice string) srvtypes.AppCreator { return func(_ log.Logger, _ tmdb.DB, _ io.Writer, _ srvtypes.AppOptions) srvtypes.Application { encodingConfig := encoding.MakeConfig(app.ModuleEncodingRegisters...) - return app.New( + app := app.New( log.NewNopLogger(), tmdb.NewMemDB(), nil, // trace store @@ -200,6 +202,8 @@ func CustomAppCreator(minGasPrice string) srvtypes.AppCreator { simapp.EmptyAppOptions{}, baseapp.SetMinGasPrices(minGasPrice), ) + app.SetEndBlocker(wrapEndBlocker(app, time.Millisecond*0)) + return app } } diff --git a/test/util/testnode/network.go b/test/util/testnode/network.go index 79a1dc7d69..eab7384c91 100644 --- a/test/util/testnode/network.go +++ b/test/util/testnode/network.go @@ -2,6 +2,7 @@ package testnode import ( "context" + "path/filepath" "testing" "github.com/celestiaorg/celestia-app/v3/test/util/genesis" @@ -19,7 +20,8 @@ func NewNetwork(t testing.TB, config *Config) (cctx Context, rpcAddr, grpcAddr s t.Helper() // initialize the genesis file and validator files for the first validator. - baseDir, err := genesis.InitFiles(t.TempDir(), config.TmConfig, config.Genesis, 0) + baseDir := filepath.Join(t.TempDir(), "testnode") + err := genesis.InitFiles(baseDir, config.TmConfig, config.AppConfig, config.Genesis, 0) require.NoError(t, err) tmNode, app, err := NewCometNode(baseDir, &config.UniversalTestingConfig) @@ -31,6 +33,7 @@ func NewNetwork(t testing.TB, config *Config) (cctx Context, rpcAddr, grpcAddr s }) cctx = NewContext(ctx, config.Genesis.Keyring(), config.TmConfig, config.Genesis.ChainID, config.AppConfig.API.Address) + cctx.tmNode = tmNode cctx, stopNode, err := StartNode(tmNode, cctx) require.NoError(t, err) diff --git a/test/util/testnode/node_interaction_api.go b/test/util/testnode/node_interaction_api.go index 162840c819..a5c175e86d 100644 --- a/test/util/testnode/node_interaction_api.go +++ b/test/util/testnode/node_interaction_api.go @@ -22,6 +22,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmconfig "github.com/tendermint/tendermint/config" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/node" rpctypes "github.com/tendermint/tendermint/rpc/core/types" ) @@ -33,6 +34,7 @@ type Context struct { goContext context.Context client.Context apiAddress string + tmNode *node.Node } func NewContext(goContext context.Context, keyring keyring.Keyring, tmConfig *tmconfig.Config, chainID, apiAddress string) Context { diff --git a/tools/blocketa/README.md b/tools/blocketa/README.md index c4968e2c22..f7de7539b5 100644 --- a/tools/blocketa/README.md +++ b/tools/blocketa/README.md @@ -15,4 +15,4 @@ arrivalTime: 2024-08-28 17:24:23.483542677 +0000 UTC ``` > [!NOTE] -> The block time is currently hard-coded. If you're running this for a network with a different block time, you'll need to update the `blockTime` constant in the main.go file. You can use https://www.mintscan.io/celestia/block/ or the blocktime tool. +> The block time is currently hard-coded. If you're running this for a network with a different block time, you'll need to update the `blockTime` constant in the main.go file. You can use [https://www.mintscan.io/celestia/block](https://www.mintscan.io/celestia/block/) or the blocktime tool. diff --git a/tools/chainbuilder/README.md b/tools/chainbuilder/README.md new file mode 100644 index 0000000000..50ceb3c06d --- /dev/null +++ b/tools/chainbuilder/README.md @@ -0,0 +1,27 @@ +# Chainbuilder + +`chainbuilder` is a tool for building a Celestia chain for testing and development purposes. + +## Usage + +Use `go` to run the binary as follows: + +```shell +go run ./tools/chainbuilder +``` + +This will create a directory with the name `testnode-{chainID}`. All files will be populated and blocks generated based on specified input. You can run a validator on the file system afterwards by calling: + +```shell +celestia-appd start --home /path/to/testnode-{chainID} +``` + +The following are the set of options when generating a chain: + +- `num-blocks` the number of blocks to be generated (default: 100) +- `block-size` the size of the blocks to be generated (default <2MB). This will be a single PFB transaction +- `square-size` the size of the max square (default: 128) +- `existing-dir` point this to a directory if you want to extend an existing chain rather than create a new one +- `namespace` allows you to pick a custom v0 namespace. By default "test" will be chosen. + +This tool takes roughly 60-70ms per 2MB block. diff --git a/tools/chainbuilder/benchmark_test.go b/tools/chainbuilder/benchmark_test.go new file mode 100644 index 0000000000..b72d9297e5 --- /dev/null +++ b/tools/chainbuilder/benchmark_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "context" + "testing" + "time" + + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" +) + +func BenchmarkRun(b *testing.B) { + cfg := BuilderConfig{ + NumBlocks: 100, + BlockSize: appconsts.DefaultMaxBytes, + BlockInterval: time.Second, + } + + dir := b.TempDir() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := Run(context.Background(), cfg, dir); err != nil { + b.Fatal(err) + } + } +} diff --git a/tools/chainbuilder/integration_test.go b/tools/chainbuilder/integration_test.go new file mode 100644 index 0000000000..eb671cd1f0 --- /dev/null +++ b/tools/chainbuilder/integration_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "context" + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v3/test/util" + "github.com/celestiaorg/celestia-app/v3/test/util/testnode" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/rpc/client/local" + tmdbm "github.com/tendermint/tm-db" + + "github.com/stretchr/testify/require" +) + +func TestRun(t *testing.T) { + if testing.Short() { + t.Skip("skipping chainbuilder tool test") + } + + numBlocks := 10 + + cfg := BuilderConfig{ + NumBlocks: numBlocks, + BlockSize: appconsts.DefaultMaxBytes, + BlockInterval: time.Second, + ChainID: tmrand.Str(6), + Namespace: defaultNamespace, + } + + dir := t.TempDir() + + // First run + err := Run(context.Background(), cfg, dir) + require.NoError(t, err) + + // Second run with existing directory + cfg.ExistingDir = filepath.Join(dir, fmt.Sprintf("testnode-%s", cfg.ChainID)) + err = Run(context.Background(), cfg, dir) + require.NoError(t, err) + + tmCfg := testnode.DefaultTendermintConfig() + tmCfg.SetRoot(cfg.ExistingDir) + + appDB, err := tmdbm.NewDB("application", tmdbm.GoLevelDBBackend, tmCfg.DBDir()) + require.NoError(t, err) + + encCfg := encoding.MakeConfig(app.ModuleBasics) + + app := app.New( + log.NewNopLogger(), + appDB, + nil, + 0, + encCfg, + 0, + util.EmptyAppOptions{}, + baseapp.SetMinGasPrices(fmt.Sprintf("%f%s", appconsts.DefaultMinGasPrice, appconsts.BondDenom)), + ) + + nodeKey, err := p2p.LoadNodeKey(tmCfg.NodeKeyFile()) + require.NoError(t, err) + + cometNode, err := node.NewNode( + tmCfg, + privval.LoadOrGenFilePV(tmCfg.PrivValidatorKeyFile(), tmCfg.PrivValidatorStateFile()), + nodeKey, + proxy.NewLocalClientCreator(app), + node.DefaultGenesisDocProviderFunc(tmCfg), + node.DefaultDBProvider, + node.DefaultMetricsProvider(tmCfg.Instrumentation), + log.NewNopLogger(), + ) + require.NoError(t, err) + + require.NoError(t, cometNode.Start()) + defer func() { _ = cometNode.Stop() }() + + client := local.New(cometNode) + status, err := client.Status(context.Background()) + require.NoError(t, err) + require.NotNil(t, status) + // assert that the new node eventually makes progress in the chain + require.Eventually(t, func() bool { + status, err := client.Status(context.Background()) + require.NoError(t, err) + return status.SyncInfo.LatestBlockHeight >= int64(numBlocks*2) + }, time.Second*10, time.Millisecond*100) + require.NoError(t, cometNode.Stop()) + cometNode.Wait() +} diff --git a/tools/chainbuilder/main.go b/tools/chainbuilder/main.go new file mode 100644 index 0000000000..fe385084b3 --- /dev/null +++ b/tools/chainbuilder/main.go @@ -0,0 +1,547 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/celestiaorg/go-square/v2" + "github.com/celestiaorg/go-square/v2/share" + dbm "github.com/cometbft/cometbft-db" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/spf13/cobra" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/privval" + smproto "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" + tmdbm "github.com/tendermint/tm-db" + + "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v3/pkg/da" + "github.com/celestiaorg/celestia-app/v3/pkg/user" + "github.com/celestiaorg/celestia-app/v3/test/util" + "github.com/celestiaorg/celestia-app/v3/test/util/genesis" + "github.com/celestiaorg/celestia-app/v3/test/util/testnode" + blobtypes "github.com/celestiaorg/celestia-app/v3/x/blob/types" +) + +var defaultNamespace share.Namespace + +const ( + defaultNamespaceStr = "test" + maxSquareSize = 512 +) + +func init() { + defaultNamespace = share.MustNewV0Namespace([]byte(defaultNamespaceStr)) +} + +func main() { + rootCmd := &cobra.Command{ + Use: "chainbuilder", + Short: "Build a Celestia chain", + RunE: func(cmd *cobra.Command, _ []string) error { + numBlocks, _ := cmd.Flags().GetInt("num-blocks") + blockSize, _ := cmd.Flags().GetInt("block-size") + blockInterval, _ := cmd.Flags().GetDuration("block-interval") + existingDir, _ := cmd.Flags().GetString("existing-dir") + namespaceStr, _ := cmd.Flags().GetString("namespace") + upToTime, _ := cmd.Flags().GetBool("up-to-now") + appVersion, _ := cmd.Flags().GetUint64("app-version") + chainID, _ := cmd.Flags().GetString("chain-id") + var namespace share.Namespace + if namespaceStr == "" { + namespace = defaultNamespace + } else { + var err error + namespace, err = share.NewV0Namespace([]byte(namespaceStr)) + if err != nil { + return fmt.Errorf("invalid namespace: %w", err) + } + } + + cfg := BuilderConfig{ + NumBlocks: numBlocks, + BlockSize: blockSize, + BlockInterval: blockInterval, + ExistingDir: existingDir, + Namespace: namespace, + ChainID: tmrand.Str(6), + UpToTime: upToTime, + AppVersion: appVersion, + } + + if chainID != "" { + cfg.ChainID = chainID + } + + dir, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current working directory: %w", err) + } + + return Run(cmd.Context(), cfg, dir) + }, + } + + rootCmd.Flags().Int("num-blocks", 100, "Number of blocks to generate") + rootCmd.Flags().Int("block-size", appconsts.DefaultMaxBytes, "Size of each block in bytes") + rootCmd.Flags().Duration("block-interval", time.Second, "Interval between blocks") + rootCmd.Flags().String("existing-dir", "", "Existing directory to load chain from") + rootCmd.Flags().String("namespace", "", "Custom namespace for the chain") + rootCmd.Flags().Bool("up-to-now", false, "Tool will terminate if the block time reaches the current time") + rootCmd.Flags().Uint64("app-version", appconsts.LatestVersion, "App version to use for the chain") + rootCmd.Flags().String("chain-id", "", "Chain ID to use for the chain. Defaults to a random 6 character string") + rootCmd.SilenceUsage = true + rootCmd.SilenceErrors = true + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +type BuilderConfig struct { + NumBlocks int + BlockSize int + BlockInterval time.Duration + ExistingDir string + Namespace share.Namespace + ChainID string + AppVersion uint64 + UpToTime bool +} + +func Run(ctx context.Context, cfg BuilderConfig, dir string) error { + startTime := time.Now().Add(-1 * cfg.BlockInterval * time.Duration(cfg.NumBlocks)).UTC() + currentTime := startTime + + encCfg := encoding.MakeConfig(app.ModuleBasics) + tmCfg := app.DefaultConsensusConfig() + var ( + gen *genesis.Genesis + kr keyring.Keyring + err error + ) + if cfg.ExistingDir == "" { + dir = filepath.Join(dir, fmt.Sprintf("testnode-%s", cfg.ChainID)) + kr, err = keyring.New(app.Name, keyring.BackendTest, dir, nil, encCfg.Codec) + if err != nil { + return fmt.Errorf("failed to create keyring: %w", err) + } + + validator := genesis.NewDefaultValidator(testnode.DefaultValidatorAccountName) + appCfg := app.DefaultAppConfig() + appCfg.Pruning = "everything" // we just want the last two states + appCfg.StateSync.SnapshotInterval = 0 + cp := app.DefaultConsensusParams() + + cp.Version.AppVersion = cfg.AppVersion // set the app version + gen = genesis.NewDefaultGenesis(). + WithConsensusParams(cp). + WithKeyring(kr). + WithChainID(cfg.ChainID). + WithGenesisTime(startTime). + WithValidators(validator) + + if err := genesis.InitFiles(dir, tmCfg, appCfg, gen, 0); err != nil { + return fmt.Errorf("failed to initialize genesis files: %w", err) + } + fmt.Println("Creating chain from scratch with Chain ID:", gen.ChainID) + } else { + cfgPath := filepath.Join(cfg.ExistingDir, "config/config.toml") + if _, err := os.Stat(cfgPath); os.IsNotExist(err) { + return fmt.Errorf("config file for existing chain not found at %s", cfgPath) + } + fmt.Println("Loading chain from existing directory:", cfg.ExistingDir) + tmCfg.SetRoot(cfg.ExistingDir) + kr, err = keyring.New(app.Name, keyring.BackendTest, cfg.ExistingDir, nil, encCfg.Codec) + if err != nil { + return fmt.Errorf("failed to load keyring: %w", err) + } + } + + validatorKey := privval.LoadFilePV(tmCfg.PrivValidatorKeyFile(), tmCfg.PrivValidatorStateFile()) + validatorAddr := validatorKey.Key.Address + + blockDB, err := dbm.NewDB("blockstore", dbm.GoLevelDBBackend, tmCfg.DBDir()) + if err != nil { + return fmt.Errorf("failed to create block database: %w", err) + } + + blockStore := store.NewBlockStore(blockDB) + + stateDB, err := dbm.NewDB("state", dbm.GoLevelDBBackend, tmCfg.DBDir()) + if err != nil { + return fmt.Errorf("failed to create state database: %w", err) + } + + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: true, + }) + + appDB, err := tmdbm.NewDB("application", tmdbm.GoLevelDBBackend, tmCfg.DBDir()) + if err != nil { + return fmt.Errorf("failed to create application database: %w", err) + } + + simApp := app.New( + log.NewNopLogger(), + appDB, + nil, + 0, + encCfg, + 0, + util.EmptyAppOptions{}, + baseapp.SetMinGasPrices(fmt.Sprintf("%f%s", appconsts.DefaultMinGasPrice, appconsts.BondDenom)), + ) + + infoResp := simApp.Info(abci.RequestInfo{}) + + lastHeight := blockStore.Height() + if infoResp.LastBlockHeight != lastHeight { + return fmt.Errorf("last application height is %d, but the block store height is %d", infoResp.LastBlockHeight, lastHeight) + } + + if lastHeight == 0 { + if gen == nil { + return fmt.Errorf("non empty directory but no blocks found") + } + + genDoc, err := gen.Export() + if err != nil { + return fmt.Errorf("failed to export genesis document: %w", err) + } + + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) + if err != nil { + return fmt.Errorf("failed to load state from database or genesis document: %w", err) + } + + validators := make([]*types.Validator, len(genDoc.Validators)) + for i, val := range genDoc.Validators { + validators[i] = types.NewValidator(val.PubKey, val.Power) + } + validatorSet := types.NewValidatorSet(validators) + nextVals := types.TM2PB.ValidatorUpdates(validatorSet) + csParams := types.TM2PB.ConsensusParams(genDoc.ConsensusParams) + res := simApp.InitChain(abci.RequestInitChain{ + ChainId: genDoc.ChainID, + Time: genDoc.GenesisTime, + ConsensusParams: csParams, + Validators: nextVals, + AppStateBytes: genDoc.AppState, + InitialHeight: genDoc.InitialHeight, + }) + + vals, err := types.PB2TM.ValidatorUpdates(res.Validators) + if err != nil { + return fmt.Errorf("failed to convert validator updates: %w", err) + } + state.Validators = types.NewValidatorSet(vals) + state.NextValidators = types.NewValidatorSet(vals).CopyIncrementProposerPriority(1) + state.AppHash = res.AppHash + state.LastResultsHash = merkle.HashFromByteSlices(nil) + if err := stateStore.Save(state); err != nil { + return fmt.Errorf("failed to save initial state: %w", err) + } + currentTime = currentTime.Add(cfg.BlockInterval) + } else { + fmt.Println("Starting from height", lastHeight) + } + state, err := stateStore.Load() + if err != nil { + return fmt.Errorf("failed to load state: %w", err) + } + if cfg.ExistingDir != "" { + // if this is extending an existing chain, we want to start + // the time to be where the existing chain left off + currentTime = state.LastBlockTime.Add(cfg.BlockInterval) + } + + if state.ConsensusParams.Version.AppVersion != cfg.AppVersion { + return fmt.Errorf("app version mismatch: state has %d, but cfg has %d", state.ConsensusParams.Version.AppVersion, cfg.AppVersion) + } + + if state.LastBlockHeight != lastHeight { + return fmt.Errorf("last block height mismatch: state has %d, but block store has %d", state.LastBlockHeight, lastHeight) + } + + validatorPower := state.Validators.Validators[0].VotingPower + + signer, err := user.NewSigner( + kr, + encCfg.TxConfig, + state.ChainID, + state.ConsensusParams.Version.AppVersion, + user.NewAccount(testnode.DefaultValidatorAccountName, 0, uint64(lastHeight)+1), + ) + if err != nil { + return fmt.Errorf("failed to create new signer: %w", err) + } + + var ( + errCh = make(chan error, 2) + dataCh = make(chan *tmproto.Data, 100) + persistCh = make(chan persistData, 100) + commit = types.NewCommit(0, 0, types.BlockID{}, nil) + ) + if lastHeight > 0 { + commit = blockStore.LoadSeenCommit(lastHeight) + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + errCh <- generateSquareRoutine(ctx, signer, cfg, dataCh) + }() + + go func() { + errCh <- persistDataRoutine(ctx, stateStore, blockStore, persistCh) + }() + + lastBlock := blockStore.LoadBlock(blockStore.Height()) + + for height := lastHeight + 1; height <= int64(cfg.NumBlocks)+lastHeight; height++ { + if cfg.UpToTime && lastBlock != nil && lastBlock.Time.Add(cfg.BlockInterval).After(time.Now().UTC()) { + fmt.Printf("blocks cannot be generated into the future, stopping at height %d\n", lastBlock.Height) + break + } + + select { + case <-ctx.Done(): + return ctx.Err() + case dataPB := <-dataCh: + data, err := types.DataFromProto(dataPB) + if err != nil { + return fmt.Errorf("failed to convert data from protobuf: %w", err) + } + block, blockParts := state.MakeBlock(height, data, commit, nil, validatorAddr) + blockID := types.BlockID{ + Hash: block.Hash(), + PartSetHeader: blockParts.Header(), + } + + precommitVote := &tmproto.Vote{ + Height: height, + Round: 0, + Type: tmproto.PrecommitType, + BlockID: blockID.ToProto(), + ValidatorAddress: validatorAddr, + Timestamp: currentTime, + Signature: nil, + } + + if err := validatorKey.SignVote(state.ChainID, precommitVote); err != nil { + return fmt.Errorf("failed to sign precommit vote (%s): %w", precommitVote.String(), err) + } + + commitSig := types.CommitSig{ + BlockIDFlag: types.BlockIDFlagCommit, + ValidatorAddress: validatorAddr, + Timestamp: currentTime, + Signature: precommitVote.Signature, + } + commit = types.NewCommit(height, 0, blockID, []types.CommitSig{commitSig}) + + var lastCommitInfo abci.LastCommitInfo + if height > 1 { + lastCommitInfo = abci.LastCommitInfo{ + Round: 0, + Votes: []abci.VoteInfo{ + { + Validator: abci.Validator{ + Address: validatorAddr, + Power: validatorPower, + }, + SignedLastBlock: true, + }, + }, + } + } + + beginBlockResp := simApp.BeginBlock(abci.RequestBeginBlock{ + Hash: block.Hash(), + Header: *block.Header.ToProto(), + LastCommitInfo: lastCommitInfo, + }) + + deliverTxResponses := make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) + + for idx, tx := range block.Data.Txs { + blobTx, isBlobTx := types.UnmarshalBlobTx(tx) + if isBlobTx { + tx = blobTx.Tx + } + deliverTxResponse := simApp.DeliverTx(abci.RequestDeliverTx{ + Tx: tx, + }) + if deliverTxResponse.Code != abci.CodeTypeOK { + return fmt.Errorf("failed to deliver tx: %s", deliverTxResponse.Log) + } + deliverTxResponses[idx] = &deliverTxResponse + } + + endBlockResp := simApp.EndBlock(abci.RequestEndBlock{ + Height: block.Height, + }) + + commitResp := simApp.Commit() + state.LastBlockHeight = height + state.LastBlockID = blockID + state.LastBlockTime = block.Time + state.LastValidators = state.Validators + state.Validators = state.NextValidators + state.NextValidators = state.NextValidators.CopyIncrementProposerPriority(1) + state.AppHash = commitResp.Data + state.LastResultsHash = sm.ABCIResponsesResultsHash(&smproto.ABCIResponses{ + DeliverTxs: deliverTxResponses, + BeginBlock: &beginBlockResp, + EndBlock: &endBlockResp, + }) + currentTime = currentTime.Add(cfg.BlockInterval) + persistCh <- persistData{ + state: state.Copy(), + block: block, + seenCommit: &types.Commit{ + Height: commit.Height, + Round: commit.Round, + BlockID: commit.BlockID, + Signatures: []types.CommitSig{commitSig}, + }, + } + } + } + + close(dataCh) + close(persistCh) + + var firstErr error + for i := 0; i < cap(errCh); i++ { + err := <-errCh + if err != nil && firstErr == nil { + firstErr = err + } + } + + if err := blockDB.Close(); err != nil { + return fmt.Errorf("failed to close block database: %w", err) + } + if err := stateDB.Close(); err != nil { + return fmt.Errorf("failed to close state database: %w", err) + } + if err := appDB.Close(); err != nil { + return fmt.Errorf("failed to close application database: %w", err) + } + + fmt.Println("Chain built successfully", state.LastBlockHeight) + + return firstErr +} + +func generateSquareRoutine( + ctx context.Context, + signer *user.Signer, + cfg BuilderConfig, + dataCh chan<- *tmproto.Data, +) error { + for i := 0; i < cfg.NumBlocks; i++ { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + account := signer.Accounts()[0] + + blob, err := share.NewV0Blob(cfg.Namespace, crypto.CRandBytes(cfg.BlockSize)) + if err != nil { + return err + } + + blobGas := blobtypes.DefaultEstimateGas([]uint32{uint32(cfg.BlockSize)}) + fee := float64(blobGas) * appconsts.DefaultMinGasPrice * 2 + tx, _, err := signer.CreatePayForBlobs(account.Name(), []*share.Blob{blob}, user.SetGasLimit(blobGas), user.SetFee(uint64(fee))) + if err != nil { + return err + } + if err := signer.IncrementSequence(account.Name()); err != nil { + return err + } + + dataSquare, txs, err := square.Build( + [][]byte{tx}, + maxSquareSize, + appconsts.SubtreeRootThreshold(1), + ) + if err != nil { + return err + } + + eds, err := da.ExtendShares(share.ToBytes(dataSquare)) + if err != nil { + return err + } + + dah, err := da.NewDataAvailabilityHeader(eds) + if err != nil { + return err + } + + select { + case dataCh <- &tmproto.Data{ + Txs: txs, + Hash: dah.Hash(), + SquareSize: uint64(dataSquare.Size()), + }: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} + +type persistData struct { + state sm.State + block *types.Block + seenCommit *types.Commit +} + +func persistDataRoutine( + ctx context.Context, + stateStore sm.Store, + blockStore *store.BlockStore, + dataCh <-chan persistData, +) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case data, ok := <-dataCh: + if !ok { + return nil + } + blockParts := data.block.MakePartSet(types.BlockPartSizeBytes) + blockStore.SaveBlock(data.block, blockParts, data.seenCommit) + if blockStore.Height()%100 == 0 { + fmt.Println("Reached height", blockStore.Height()) + } + + if err := stateStore.Save(data.state); err != nil { + return err + } + } + } +} diff --git a/x/blob/README.md b/x/blob/README.md index 3f1e0484ae..efa1f74858 100644 --- a/x/blob/README.md +++ b/x/blob/README.md @@ -49,6 +49,7 @@ message Params { `GasPerBlobByte` is the amount of gas that is consumed per byte of blob data when a `MsgPayForBlobs` is processed. Currently, the default value is 8. This value is set below that of normal transaction gas consumption, which is 10. +`GasPerBlobByte` was a governance-modifiable parameter in v1 and v2. In app v3 and above, it is a versioned parameter, meaning it can only be changed through hard fork upgrades. #### `GovMaxSquareSize` diff --git a/x/blob/ante/ante.go b/x/blob/ante/ante.go index fe58eae87c..3c5249b291 100644 --- a/x/blob/ante/ante.go +++ b/x/blob/ante/ante.go @@ -1,6 +1,8 @@ package ante import ( + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" "github.com/celestiaorg/celestia-app/v3/x/blob/types" "cosmossdk.io/errors" @@ -33,8 +35,12 @@ func (d MinGasPFBDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool // NOTE: here we assume only one PFB per transaction if pfb, ok := m.(*types.MsgPayForBlobs); ok { if gasPerByte == 0 { - // lazily fetch the gas per byte param - gasPerByte = d.k.GasPerBlobByte(ctx) + if ctx.BlockHeader().Version.App <= v2.Version { + // lazily fetch the gas per byte param + gasPerByte = d.k.GasPerBlobByte(ctx) + } else { + gasPerByte = appconsts.GasPerBlobByte(ctx.BlockHeader().Version.App) + } } gasToConsume := pfb.Gas(gasPerByte) if gasToConsume > txGas { diff --git a/x/blob/ante/ante_test.go b/x/blob/ante/ante_test.go index e1f16e2b7a..6be76d77d9 100644 --- a/x/blob/ante/ante_test.go +++ b/x/blob/ante/ante_test.go @@ -1,15 +1,20 @@ package ante_test import ( + "fmt" "testing" "github.com/celestiaorg/celestia-app/v3/app" "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" ante "github.com/celestiaorg/celestia-app/v3/x/blob/ante" blob "github.com/celestiaorg/celestia-app/v3/x/blob/types" "github.com/celestiaorg/go-square/v2/share" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proto/tendermint/version" ) const ( @@ -22,8 +27,9 @@ func TestPFBAnteHandler(t *testing.T) { testCases := []struct { name string pfb *blob.MsgPayForBlobs - txGas uint64 + txGas func(uint32) uint32 gasConsumed uint64 + versions []uint64 wantErr bool }{ { @@ -32,8 +38,11 @@ func TestPFBAnteHandler(t *testing.T) { // 1 share = 512 bytes = 5120 gas BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares(1))}, }, - txGas: share.ShareSize * testGasPerBlobByte, + txGas: func(testGasPerBlobByte uint32) uint32 { + return share.ShareSize * testGasPerBlobByte + }, gasConsumed: 0, + versions: []uint64{v2.Version, appconsts.LatestVersion}, wantErr: false, }, { @@ -41,8 +50,11 @@ func TestPFBAnteHandler(t *testing.T) { pfb: &blob.MsgPayForBlobs{ BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares(1)), uint32(share.AvailableBytesFromSparseShares(2))}, }, - txGas: 3 * share.ShareSize * testGasPerBlobByte, + txGas: func(testGasPerBlobByte uint32) uint32 { + return 3 * share.ShareSize * testGasPerBlobByte + }, gasConsumed: 0, + versions: []uint64{v2.Version, appconsts.LatestVersion}, wantErr: false, }, { @@ -51,8 +63,11 @@ func TestPFBAnteHandler(t *testing.T) { // 2 share = 1024 bytes = 10240 gas BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares(1) + 1)}, }, - txGas: 2*share.ShareSize*testGasPerBlobByte - 1, + txGas: func(testGasPerBlobByte uint32) uint32 { + return 2*share.ShareSize*testGasPerBlobByte - 1 + }, gasConsumed: 0, + versions: []uint64{v2.Version, appconsts.LatestVersion}, wantErr: true, }, { @@ -60,8 +75,11 @@ func TestPFBAnteHandler(t *testing.T) { pfb: &blob.MsgPayForBlobs{ BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares(1)), uint32(share.AvailableBytesFromSparseShares(2))}, }, - txGas: 3*share.ShareSize*testGasPerBlobByte - 1, + txGas: func(testGasPerBlobByte uint32) uint32 { + return 3*share.ShareSize*testGasPerBlobByte - 1 + }, gasConsumed: 0, + versions: []uint64{v2.Version, appconsts.LatestVersion}, wantErr: true, }, { @@ -70,8 +88,11 @@ func TestPFBAnteHandler(t *testing.T) { // 1 share = 512 bytes = 5120 gas BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares(1))}, }, - txGas: share.ShareSize*testGasPerBlobByte + 10000 - 1, + txGas: func(testGasPerBlobByte uint32) uint32 { + return share.ShareSize*testGasPerBlobByte + 10000 - 1 + }, gasConsumed: 10000, + versions: []uint64{v2.Version, appconsts.LatestVersion}, wantErr: true, }, { @@ -80,26 +101,43 @@ func TestPFBAnteHandler(t *testing.T) { // 1 share = 512 bytes = 5120 gas BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares(10))}, }, - txGas: 1000000, + txGas: func(_ uint32) uint32 { + return 1000000 + }, gasConsumed: 10000, + versions: []uint64{v2.Version, appconsts.LatestVersion}, wantErr: false, }, } for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - anteHandler := ante.NewMinGasPFBDecorator(mockBlobKeeper{}) - ctx := sdk.Context{}.WithGasMeter(sdk.NewGasMeter(tc.txGas)).WithIsCheckTx(true) - ctx.GasMeter().ConsumeGas(tc.gasConsumed, "test") - txBuilder := txConfig.NewTxBuilder() - require.NoError(t, txBuilder.SetMsgs(tc.pfb)) - tx := txBuilder.GetTx() - _, err := anteHandler.AnteHandle(ctx, tx, false, func(ctx sdk.Context, _ sdk.Tx, _ bool) (sdk.Context, error) { return ctx, nil }) - if tc.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) + for _, currentVersion := range tc.versions { + t.Run(fmt.Sprintf("%s v%d", tc.name, currentVersion), func(t *testing.T) { + anteHandler := ante.NewMinGasPFBDecorator(mockBlobKeeper{}) + var gasPerBlobByte uint32 + if currentVersion == v2.Version { + gasPerBlobByte = testGasPerBlobByte + } else { + gasPerBlobByte = appconsts.GasPerBlobByte(currentVersion) + } + + ctx := sdk.NewContext(nil, tmproto.Header{ + Version: version.Consensus{ + App: currentVersion, + }, + }, true, nil).WithGasMeter(sdk.NewGasMeter(uint64(tc.txGas(gasPerBlobByte)))).WithIsCheckTx(true) + + ctx.GasMeter().ConsumeGas(tc.gasConsumed, "test") + txBuilder := txConfig.NewTxBuilder() + require.NoError(t, txBuilder.SetMsgs(tc.pfb)) + tx := txBuilder.GetTx() + _, err := anteHandler.AnteHandle(ctx, tx, false, func(ctx sdk.Context, _ sdk.Tx, _ bool) (sdk.Context, error) { return ctx, nil }) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } } } diff --git a/x/blob/ante/blob_share_decorator.go b/x/blob/ante/blob_share_decorator.go index 3a23f45588..90eb7780c6 100644 --- a/x/blob/ante/blob_share_decorator.go +++ b/x/blob/ante/blob_share_decorator.go @@ -36,7 +36,7 @@ func (d BlobShareDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool maxBlobShares := d.getMaxBlobShares(ctx) for _, m := range tx.GetMsgs() { if pfb, ok := m.(*blobtypes.MsgPayForBlobs); ok { - if sharesNeeded := getSharesNeeded(pfb.BlobSizes); sharesNeeded > maxBlobShares { + if sharesNeeded := getSharesNeeded(uint32(len(ctx.TxBytes())), pfb.BlobSizes); sharesNeeded > maxBlobShares { return ctx, errors.Wrapf(blobtypes.ErrBlobsTooLarge, "the number of shares occupied by blobs in this MsgPayForBlobs %d exceeds the max number of shares available for blob data %d", sharesNeeded, maxBlobShares) } } @@ -49,10 +49,8 @@ func (d BlobShareDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool func (d BlobShareDecorator) getMaxBlobShares(ctx sdk.Context) int { squareSize := d.getMaxSquareSize(ctx) totalShares := squareSize * squareSize - // The PFB tx share must occupy at least one share so the number of blob shares - // is at most one less than totalShares. - blobShares := totalShares - 1 - return blobShares + // the shares used up by the tx are calculated in `getSharesNeeded` + return totalShares } // getMaxSquareSize returns the maximum square size based on the current values @@ -74,8 +72,9 @@ func (d BlobShareDecorator) getMaxSquareSize(ctx sdk.Context) int { } // getSharesNeeded returns the total number of shares needed to represent all of -// the blobs described by blobSizes. -func getSharesNeeded(blobSizes []uint32) (sum int) { +// the blobs described by blobSizes along with the shares used by the tx +func getSharesNeeded(txSize uint32, blobSizes []uint32) (sum int) { + sum = share.CompactSharesNeeded(txSize) for _, blobSize := range blobSizes { sum += share.SparseSharesNeeded(blobSize) } diff --git a/x/blob/ante/blob_share_decorator_test.go b/x/blob/ante/blob_share_decorator_test.go index 6c788e1935..e65dca6c3f 100644 --- a/x/blob/ante/blob_share_decorator_test.go +++ b/x/blob/ante/blob_share_decorator_test.go @@ -7,12 +7,18 @@ import ( "github.com/celestiaorg/celestia-app/v3/app/encoding" v1 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v1" v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + "github.com/celestiaorg/celestia-app/v3/pkg/user" + "github.com/celestiaorg/celestia-app/v3/test/util/blobfactory" + "github.com/celestiaorg/celestia-app/v3/test/util/testfactory" + "github.com/celestiaorg/celestia-app/v3/test/util/testnode" ante "github.com/celestiaorg/celestia-app/v3/x/blob/ante" blob "github.com/celestiaorg/celestia-app/v3/x/blob/types" "github.com/celestiaorg/go-square/v2/share" + blobtx "github.com/celestiaorg/go-square/v2/tx" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" version "github.com/tendermint/tendermint/proto/tendermint/version" ) @@ -24,141 +30,137 @@ const ( func TestBlobShareDecorator(t *testing.T) { type testCase struct { - name string - pfb *blob.MsgPayForBlobs - appVersion uint64 - wantErr error + name string + blobsPerPFB, blobSize int + appVersion uint64 + wantErr error } + rand := tmrand.NewRand() + testCases := []testCase{ { - name: "want no error if appVersion v1 and 8 MiB blob", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{8 * mebibyte}, - }, - appVersion: v1.Version, + name: "want no error if appVersion v1 and 8 MiB blob", + blobsPerPFB: 1, + blobSize: 8 * mebibyte, + appVersion: v1.Version, }, { - name: "PFB with 1 blob that is 1 byte", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{1}, - }, - appVersion: v2.Version, + name: "PFB with 1 blob that is 1 byte", + blobsPerPFB: 1, + blobSize: 1, + appVersion: v2.Version, }, { - name: "PFB with 1 blob that is 1 MiB", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{mebibyte}, - }, - appVersion: v2.Version, + name: "PFB with 1 blob that is 1 MiB", + blobsPerPFB: 1, + blobSize: 1 * mebibyte, + appVersion: v2.Version, }, { - name: "PFB with 1 blob that is 2 MiB", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{2 * mebibyte}, - }, - appVersion: v2.Version, + name: "PFB with 1 blob that is 2 MiB", + blobsPerPFB: 1, + blobSize: 2 * mebibyte, + appVersion: v2.Version, // This test case should return an error because a square size of 64 // has exactly 2 MiB of total capacity so the total blob capacity // will be slightly smaller than 2 MiB. wantErr: blob.ErrBlobsTooLarge, }, { - name: "PFB with 2 blobs that are 1 byte each", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{1, 1}, - }, - appVersion: v2.Version, + name: "PFB with 2 blobs that are 1 byte each", + blobsPerPFB: 2, + blobSize: 1, + appVersion: v2.Version, }, { - name: "PFB with 2 blobs that are 1 MiB each", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{mebibyte, mebibyte}, - }, - appVersion: v2.Version, + name: "PFB with 2 blobs that are 1 MiB each", + blobsPerPFB: 2, + blobSize: 1 * mebibyte, + appVersion: v2.Version, // This test case should return an error for the same reason a // single blob that is 2 MiB returns an error. wantErr: blob.ErrBlobsTooLarge, }, { - name: "PFB with many single byte blobs should fit", - pfb: &blob.MsgPayForBlobs{ - // 4095 blobs each of size 1 byte should occupy 4095 shares. - // When square size is 64, there are 4095 shares available to - // blob shares so we don't expect an error for this test case. - BlobSizes: repeat(4095, 1), - }, - appVersion: v2.Version, + name: "PFB with many single byte blobs should fit", + blobsPerPFB: 3000, + blobSize: 1, + appVersion: v2.Version, }, { - name: "PFB with too many single byte blobs should not fit", - pfb: &blob.MsgPayForBlobs{ - // 4096 blobs each of size 1 byte should occupy 4096 shares. - // When square size is 64, there are 4095 shares available to - // blob shares so we expect an error for this test case. - BlobSizes: repeat(4096, 1), - }, - appVersion: v2.Version, - wantErr: blob.ErrBlobsTooLarge, + name: "PFB with too many single byte blobs should not fit", + blobsPerPFB: 4000, + blobSize: 1, + appVersion: v2.Version, + wantErr: blob.ErrBlobsTooLarge, }, { - name: "PFB with 1 blob that is 1 share", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares(1))}, - }, - appVersion: v2.Version, + name: "PFB with 1 blob that is 1 share", + blobsPerPFB: 1, + blobSize: 100, + appVersion: v2.Version, }, { - name: "PFB with 1 blob that occupies total square - 1", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares((squareSize * squareSize) - 1))}, - }, - appVersion: v2.Version, + name: "PFB with 1 blob that occupies total square - 1", + blobsPerPFB: 1, + blobSize: share.AvailableBytesFromSparseShares(squareSize*squareSize - 1), + appVersion: v2.Version, }, { - name: "PFB with 1 blob that occupies total square", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{uint32(share.AvailableBytesFromSparseShares(squareSize * squareSize))}, - }, - appVersion: v2.Version, + name: "PFB with 1 blob that occupies total square", + blobsPerPFB: 1, + blobSize: share.AvailableBytesFromSparseShares(squareSize * squareSize), + appVersion: v2.Version, // This test case should return an error because if the blob // occupies the total square, there is no space for the PFB tx // share. wantErr: blob.ErrBlobsTooLarge, }, { - name: "PFB with 2 blobs that are 1 share each", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{ - uint32(share.AvailableBytesFromSparseShares(1)), - uint32(share.AvailableBytesFromSparseShares(1)), - }, - }, - appVersion: v2.Version, + name: "PFB with 2 blobs that are 1 share each", + blobsPerPFB: 2, + blobSize: 100, + appVersion: v2.Version, }, { - name: "PFB with 2 blobs that occupy half the square each", - pfb: &blob.MsgPayForBlobs{ - BlobSizes: []uint32{ - uint32(share.AvailableBytesFromSparseShares(squareSize * squareSize / 2)), - uint32(share.AvailableBytesFromSparseShares(squareSize * squareSize / 2)), - }, - }, - appVersion: v2.Version, - wantErr: blob.ErrBlobsTooLarge, + name: "PFB with 2 blobs that occupy half the square each", + blobsPerPFB: 2, + blobSize: share.AvailableBytesFromSparseShares(squareSize * squareSize / 2), + appVersion: v2.Version, + wantErr: blob.ErrBlobsTooLarge, }, } - txConfig := encoding.MakeConfig(app.ModuleEncodingRegisters...).TxConfig + ecfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - txBuilder := txConfig.NewTxBuilder() - require.NoError(t, txBuilder.SetMsgs(tc.pfb)) - tx := txBuilder.GetTx() + kr, _ := testnode.NewKeyring(testfactory.TestAccName) + signer, err := user.NewSigner( + kr, + ecfg.TxConfig, + testfactory.ChainID, + tc.appVersion, + user.NewAccount(testfactory.TestAccName, 1, 0), + ) + require.NoError(t, err) + + blobTx := blobfactory.RandBlobTxs(signer, rand, 1, tc.blobsPerPFB, tc.blobSize) + + btx, isBlob, err := blobtx.UnmarshalBlobTx([]byte(blobTx[0])) + require.NoError(t, err) + require.True(t, isBlob) + + sdkTx, err := ecfg.TxConfig.TxDecoder()(btx.Tx) + require.NoError(t, err) decorator := ante.NewBlobShareDecorator(mockBlobKeeper{}) - ctx := sdk.Context{}.WithIsCheckTx(true).WithBlockHeader(tmproto.Header{Version: version.Consensus{App: tc.appVersion}}) - _, err := decorator.AnteHandle(ctx, tx, false, mockNext) + ctx := sdk.Context{}. + WithIsCheckTx(true). + WithBlockHeader(tmproto.Header{Version: version.Consensus{App: tc.appVersion}}). + WithTxBytes(btx.Tx) + _, err = decorator.AnteHandle(ctx, sdkTx, false, mockNext) assert.ErrorIs(t, tc.wantErr, err) }) } @@ -167,12 +169,3 @@ func TestBlobShareDecorator(t *testing.T) { func mockNext(ctx sdk.Context, _ sdk.Tx, _ bool) (sdk.Context, error) { return ctx, nil } - -// repeat returns a slice of length n with each element set to val. -func repeat(n int, val uint32) []uint32 { - result := make([]uint32, n) - for i := range result { - result[i] = val - } - return result -} diff --git a/x/blob/client/testutil/integration_test.go b/x/blob/client/testutil/integration_test.go index d29de7f658..b5f6c24db2 100644 --- a/x/blob/client/testutil/integration_test.go +++ b/x/blob/client/testutil/integration_test.go @@ -143,7 +143,6 @@ func (s *IntegrationTestSuite) TestSubmitPayForBlob() { } for _, tc := range testCases { - tc := tc require.NoError(s.ctx.WaitForNextBlock()) s.Run(tc.name, func() { cmd := paycli.CmdPayForBlob() diff --git a/x/blob/keeper/gas_test.go b/x/blob/keeper/gas_test.go index 8c7bd534ff..a1e17a2bdd 100644 --- a/x/blob/keeper/gas_test.go +++ b/x/blob/keeper/gas_test.go @@ -3,6 +3,7 @@ package keeper_test import ( "testing" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/x/blob/types" "github.com/celestiaorg/go-square/v2/share" sdk "github.com/cosmos/cosmos-sdk/types" @@ -23,33 +24,33 @@ func TestPayForBlobGas(t *testing.T) { { name: "1 byte blob", // occupies 1 share msg: types.MsgPayForBlobs{BlobSizes: []uint32{1}}, - wantGasConsumed: uint64(1*share.ShareSize*types.DefaultGasPerBlobByte + paramLookUpCost), // 1 share * 512 bytes per share * 8 gas per byte + 1060 gas for fetching param = 5156 gas + wantGasConsumed: uint64(1*share.ShareSize*appconsts.GasPerBlobByte(appconsts.LatestVersion) + paramLookUpCost), // 1 share * 512 bytes per share * 8 gas per byte + 1060 gas for fetching param = 5156 gas }, { name: "100 byte blob", // occupies 1 share msg: types.MsgPayForBlobs{BlobSizes: []uint32{100}}, - wantGasConsumed: uint64(1*share.ShareSize*types.DefaultGasPerBlobByte + paramLookUpCost), + wantGasConsumed: uint64(1*share.ShareSize*appconsts.GasPerBlobByte(appconsts.LatestVersion) + paramLookUpCost), }, { name: "1024 byte blob", // occupies 3 shares because share prefix (e.g. namespace, info byte) msg: types.MsgPayForBlobs{BlobSizes: []uint32{1024}}, - wantGasConsumed: uint64(3*share.ShareSize*types.DefaultGasPerBlobByte + paramLookUpCost), // 3 shares * 512 bytes per share * 8 gas per byte + 1060 gas for fetching param = 13348 gas + wantGasConsumed: uint64(3*share.ShareSize*appconsts.GasPerBlobByte(appconsts.LatestVersion) + paramLookUpCost), // 3 shares * 512 bytes per share * 8 gas per byte + 1060 gas for fetching param = 13348 gas }, { name: "3 blobs, 1 share each", msg: types.MsgPayForBlobs{BlobSizes: []uint32{1, 1, 1}}, - wantGasConsumed: uint64(3*share.ShareSize*types.DefaultGasPerBlobByte + paramLookUpCost), // 3 shares * 512 bytes per share * 8 gas per byte + 1060 gas for fetching param = 13348 gas + wantGasConsumed: uint64(3*share.ShareSize*appconsts.GasPerBlobByte(appconsts.LatestVersion) + paramLookUpCost), // 3 shares * 512 bytes per share * 8 gas per byte + 1060 gas for fetching param = 13348 gas }, { name: "3 blobs, 6 shares total", msg: types.MsgPayForBlobs{BlobSizes: []uint32{1024, 800, 100}}, - wantGasConsumed: uint64(6*share.ShareSize*types.DefaultGasPerBlobByte + paramLookUpCost), // 6 shares * 512 bytes per share * 8 gas per byte + 1060 gas for fetching param = 25636 gas + wantGasConsumed: uint64(6*share.ShareSize*appconsts.GasPerBlobByte(appconsts.LatestVersion) + paramLookUpCost), // 6 shares * 512 bytes per share * 8 gas per byte + 1060 gas for fetching param = 25636 gas }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - k, stateStore, _ := CreateKeeper(t) + k, stateStore, _ := CreateKeeper(t, appconsts.LatestVersion) ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, nil) _, err := k.PayForBlobs(sdk.WrapSDKContext(ctx), &tc.msg) require.NoError(t, err) @@ -62,7 +63,7 @@ func TestPayForBlobGas(t *testing.T) { func TestChangingGasParam(t *testing.T) { msg := types.MsgPayForBlobs{BlobSizes: []uint32{1024}} - k, stateStore, _ := CreateKeeper(t) + k, stateStore, _ := CreateKeeper(t, appconsts.LatestVersion) tempCtx := sdk.NewContext(stateStore, tmproto.Header{}, false, nil) ctx1 := sdk.NewContext(stateStore, tmproto.Header{}, false, nil) diff --git a/x/blob/keeper/genesis_test.go b/x/blob/keeper/genesis_test.go index d291deebf8..120f9cb21e 100644 --- a/x/blob/keeper/genesis_test.go +++ b/x/blob/keeper/genesis_test.go @@ -3,6 +3,7 @@ package keeper_test import ( "testing" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/x/blob" "github.com/celestiaorg/celestia-app/v3/x/blob/types" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ func TestGenesis(t *testing.T) { Params: types.DefaultParams(), } - k, _, ctx := CreateKeeper(t) + k, _, ctx := CreateKeeper(t, appconsts.LatestVersion) blob.InitGenesis(ctx, *k, genesisState) got := blob.ExportGenesis(ctx, *k) require.NotNil(t, got) diff --git a/x/blob/keeper/grpc_query_params_test.go b/x/blob/keeper/grpc_query_params_test.go index 70f60860ee..e8c367cd35 100644 --- a/x/blob/keeper/grpc_query_params_test.go +++ b/x/blob/keeper/grpc_query_params_test.go @@ -3,13 +3,14 @@ package keeper_test import ( "testing" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/x/blob/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" ) func TestParamsQuery(t *testing.T) { - keeper, _, ctx := CreateKeeper(t) + keeper, _, ctx := CreateKeeper(t, appconsts.LatestVersion) wctx := sdk.WrapSDKContext(ctx) params := types.DefaultParams() keeper.SetParams(ctx, params) diff --git a/x/blob/keeper/keeper.go b/x/blob/keeper/keeper.go index 72a3fb7605..a7e0f3cc71 100644 --- a/x/blob/keeper/keeper.go +++ b/x/blob/keeper/keeper.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" "github.com/celestiaorg/celestia-app/v3/x/blob/types" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" @@ -43,7 +45,14 @@ func (k Keeper) Logger(ctx sdk.Context) log.Logger { func (k Keeper) PayForBlobs(goCtx context.Context, msg *types.MsgPayForBlobs) (*types.MsgPayForBlobsResponse, error) { ctx := sdk.UnwrapSDKContext(goCtx) - gasToConsume := types.GasToConsume(msg.BlobSizes, k.GasPerBlobByte(ctx)) + // GasPerBlobByte is a versioned param from version 3 onwards. + var gasToConsume uint64 + if ctx.BlockHeader().Version.App <= v2.Version { + gasToConsume = types.GasToConsume(msg.BlobSizes, k.GasPerBlobByte(ctx)) + } else { + gasToConsume = types.GasToConsume(msg.BlobSizes, appconsts.GasPerBlobByte(ctx.BlockHeader().Version.App)) + } + ctx.GasMeter().ConsumeGas(gasToConsume, payForBlobGasDescriptor) err := ctx.EventManager().EmitTypedEvent( diff --git a/x/blob/keeper/keeper_test.go b/x/blob/keeper/keeper_test.go index e3da5cb12a..8fee7f81a8 100644 --- a/x/blob/keeper/keeper_test.go +++ b/x/blob/keeper/keeper_test.go @@ -27,7 +27,7 @@ import ( // TestPayForBlobs verifies the attributes on the emitted event. func TestPayForBlobs(t *testing.T) { - k, _, ctx := CreateKeeper(t) + k, _, ctx := CreateKeeper(t, appconsts.LatestVersion) signer := "celestia15drmhzw5kwgenvemy30rqqqgq52axf5wwrruf7" namespace := share.MustNewV0Namespace(bytes.Repeat([]byte{1}, share.NamespaceVersionZeroIDSize)) namespaces := [][]byte{namespace.Bytes()} @@ -72,7 +72,7 @@ func createMsgPayForBlob(t *testing.T, signer string, namespace share.Namespace, return msg } -func CreateKeeper(t *testing.T) (*keeper.Keeper, store.CommitMultiStore, sdk.Context) { +func CreateKeeper(t *testing.T, version uint64) (*keeper.Keeper, store.CommitMultiStore, sdk.Context) { storeKey := sdk.NewKVStoreKey(paramtypes.StoreKey) tStoreKey := storetypes.NewTransientStoreKey(paramtypes.TStoreKey) @@ -87,7 +87,7 @@ func CreateKeeper(t *testing.T) (*keeper.Keeper, store.CommitMultiStore, sdk.Con ctx := sdk.NewContext(stateStore, tmproto.Header{ Version: tmversion.Consensus{ Block: 1, - App: 1, + App: version, }, }, false, nil) diff --git a/x/blob/keeper/params_test.go b/x/blob/keeper/params_test.go index 9431ef3828..53f5dbca20 100644 --- a/x/blob/keeper/params_test.go +++ b/x/blob/keeper/params_test.go @@ -3,12 +3,13 @@ package keeper_test import ( "testing" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/x/blob/types" "github.com/stretchr/testify/require" ) func TestGetParams(t *testing.T) { - k, _, ctx := CreateKeeper(t) + k, _, ctx := CreateKeeper(t, appconsts.LatestVersion) params := types.DefaultParams() k.SetParams(ctx, params) diff --git a/x/blob/types/blob_tx.go b/x/blob/types/blob_tx.go index 1f6273f10c..8089e247ef 100644 --- a/x/blob/types/blob_tx.go +++ b/x/blob/types/blob_tx.go @@ -3,6 +3,7 @@ package types import ( "bytes" + v3 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v3" "github.com/celestiaorg/go-square/v2/inclusion" "github.com/celestiaorg/go-square/v2/share" "github.com/celestiaorg/go-square/v2/tx" @@ -35,7 +36,7 @@ func NewV1Blob(ns share.Namespace, data []byte, signer sdk.AccAddress) (*share.B // ValidateBlobTx performs stateless checks on the BlobTx to ensure that the // blobs attached to the transaction are valid. -func ValidateBlobTx(txcfg client.TxEncodingConfig, bTx *tx.BlobTx, subtreeRootThreshold int) error { +func ValidateBlobTx(txcfg client.TxEncodingConfig, bTx *tx.BlobTx, subtreeRootThreshold int, appVersion uint64) error { if bTx == nil { return ErrNoBlobs } @@ -79,6 +80,9 @@ func ValidateBlobTx(txcfg client.TxEncodingConfig, bTx *tx.BlobTx, subtreeRootTh // If share version is 1, assert that the signer in the blob // matches the signer in the msgPFB. if blob.ShareVersion() == share.ShareVersionOne { + if appVersion < v3.Version { + return ErrUnsupportedShareVersion.Wrapf("share version %d is not supported in %d. Supported from v3 onwards", blob.ShareVersion(), appVersion) + } if !bytes.Equal(blob.Signer(), signer) { return ErrInvalidBlobSigner.Wrapf("blob signer %s does not match msgPFB signer %s", sdk.AccAddress(blob.Signer()).String(), msgPFB.Signer) } diff --git a/x/blob/types/blob_tx_test.go b/x/blob/types/blob_tx_test.go index 1a2d8295dc..a1867e2d92 100644 --- a/x/blob/types/blob_tx_test.go +++ b/x/blob/types/blob_tx_test.go @@ -251,7 +251,7 @@ func TestValidateBlobTx(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := types.ValidateBlobTx(encCfg.TxConfig, tt.getTx(), appconsts.DefaultSubtreeRootThreshold) + err := types.ValidateBlobTx(encCfg.TxConfig, tt.getTx(), appconsts.DefaultSubtreeRootThreshold, appconsts.LatestVersion) if tt.expectedErr != nil { assert.ErrorIs(t, err, tt.expectedErr, tt.name) } diff --git a/x/blob/types/payforblob.go b/x/blob/types/payforblob.go index 92eca0a4d7..b49eb05394 100644 --- a/x/blob/types/payforblob.go +++ b/x/blob/types/payforblob.go @@ -10,7 +10,6 @@ import ( "github.com/celestiaorg/go-square/v2/share" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx" - auth "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/tendermint/tendermint/crypto/merkle" "golang.org/x/exp/slices" ) @@ -162,10 +161,9 @@ func EstimateGas(blobSizes []uint32, gasPerByte uint32, txSizeCost uint64) uint6 return GasToConsume(blobSizes, gasPerByte) + (txSizeCost * BytesPerBlobInfo * uint64(len(blobSizes))) + PFBGasFixedCost } -// DefaultEstimateGas runs EstimateGas with the system defaults. The network may change these values -// through governance, thus this function should predominantly be used in testing. +// DefaultEstimateGas runs EstimateGas with the system defaults. func DefaultEstimateGas(blobSizes []uint32) uint64 { - return EstimateGas(blobSizes, appconsts.DefaultGasPerBlobByte, auth.DefaultTxSizeCostPerByte) + return EstimateGas(blobSizes, appconsts.DefaultGasPerBlobByte, appconsts.DefaultTxSizeCostPerByte) } // ValidateBlobNamespace returns an error if the provided namespace is an diff --git a/x/blobstream/integration_test.go b/x/blobstream/integration_test.go index a8f19bf4b6..7bb03a49ee 100644 --- a/x/blobstream/integration_test.go +++ b/x/blobstream/integration_test.go @@ -46,6 +46,8 @@ func (s *BlobstreamIntegrationSuite) SetupSuite() { cctx, _, _ := testnode.NewNetwork(t, cfg) s.ecfg = encoding.MakeConfig(app.ModuleEncodingRegisters...) s.cctx = cctx + + require.NoError(t, s.cctx.WaitForBlocks(10)) } func (s *BlobstreamIntegrationSuite) TestBlobstream() { diff --git a/x/blobstream/keeper/keeper_data_commitment.go b/x/blobstream/keeper/keeper_data_commitment.go index 0b0d656992..19bfc6d01c 100644 --- a/x/blobstream/keeper/keeper_data_commitment.go +++ b/x/blobstream/keeper/keeper_data_commitment.go @@ -109,7 +109,7 @@ func (k Keeper) GetLatestDataCommitment(ctx sdk.Context) (types.DataCommitment, return types.DataCommitment{}, err } if !found { - return types.DataCommitment{}, errors.Wrapf(types.ErrAttestationNotFound, fmt.Sprintf("nonce %d", i)) + return types.DataCommitment{}, errors.Wrapf(types.ErrAttestationNotFound, "nonce %d", i) } dcc, ok := att.(*types.DataCommitment) if !ok { @@ -136,7 +136,7 @@ func (k Keeper) HasDataCommitmentInStore(ctx sdk.Context) (bool, error) { return false, err } if !found { - return false, errors.Wrapf(types.ErrAttestationNotFound, fmt.Sprintf("nonce %d", i)) + return false, errors.Wrapf(types.ErrAttestationNotFound, "nonce %d", i) } _, ok := att.(*types.DataCommitment) if !ok { diff --git a/x/blobstream/keeper/keeper_valset_test.go b/x/blobstream/keeper/keeper_valset_test.go index d4b72406bf..6a73da03eb 100644 --- a/x/blobstream/keeper/keeper_valset_test.go +++ b/x/blobstream/keeper/keeper_valset_test.go @@ -57,7 +57,6 @@ func TestCurrentValsetNormalization(t *testing.T) { }, } for msg, spec := range specs { - spec := spec t.Run(msg, func(t *testing.T) { input, ctx := testutil.SetupTestChain(t, spec.srcPowers) r, err := input.BlobstreamKeeper.GetCurrentValset(ctx) diff --git a/x/minfee/grpc_query_test.go b/x/minfee/grpc_query_test.go index a7ab0fdfef..9e797ee828 100644 --- a/x/minfee/grpc_query_test.go +++ b/x/minfee/grpc_query_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/celestiaorg/celestia-app/v3/app" - v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" testutil "github.com/celestiaorg/celestia-app/v3/test/util" "github.com/celestiaorg/celestia-app/v3/x/minfee" sdk "github.com/cosmos/cosmos-sdk/types" @@ -24,5 +24,5 @@ func TestQueryNetworkMinGasPrice(t *testing.T) { require.NoError(t, err) // Check the response - require.Equal(t, v2.NetworkMinGasPrice, resp.NetworkMinGasPrice.MustFloat64()) + require.Equal(t, appconsts.DefaultNetworkMinGasPrice, resp.NetworkMinGasPrice.MustFloat64()) } diff --git a/x/minfee/params.go b/x/minfee/params.go index ef17044d7c..2f1edc3ee8 100644 --- a/x/minfee/params.go +++ b/x/minfee/params.go @@ -3,7 +3,7 @@ package minfee import ( "fmt" - v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" sdk "github.com/cosmos/cosmos-sdk/types" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" ) @@ -18,7 +18,7 @@ var ( ) func init() { - DefaultNetworkMinGasPriceDec, err := sdk.NewDecFromStr(fmt.Sprintf("%f", v2.NetworkMinGasPrice)) + DefaultNetworkMinGasPriceDec, err := sdk.NewDecFromStr(fmt.Sprintf("%f", appconsts.DefaultNetworkMinGasPrice)) if err != nil { panic(err) } diff --git a/x/mint/client/testutil/suite_test.go b/x/mint/client/testutil/suite_test.go index 9be2f45fc8..4910fcc99d 100644 --- a/x/mint/client/testutil/suite_test.go +++ b/x/mint/client/testutil/suite_test.go @@ -68,8 +68,6 @@ func (s *IntegrationTestSuite) TestGetCmdQueryInflationRate() { } for _, tc := range testCases { - tc := tc - s.Run(tc.name, func() { cmd := cli.GetCmdQueryInflationRate() @@ -103,8 +101,6 @@ func (s *IntegrationTestSuite) TestGetCmdQueryAnnualProvisions() { expectedAnnualProvision := mint.InitialInflationRateAsDec().MulInt(sdk.NewInt(testnode.DefaultInitialBalance)) for _, tc := range testCases { - tc := tc - s.Run(tc.name, func() { cmd := cli.GetCmdQueryAnnualProvisions() out, err := clitestutil.ExecTestCLICmd(s.cctx.Context, cmd, tc.args) @@ -134,8 +130,6 @@ func (s *IntegrationTestSuite) TestGetCmdQueryGenesisTime() { } for _, tc := range testCases { - tc := tc - s.Run(tc.name, func() { cmd := cli.GetCmdQueryGenesisTime() out, err := clitestutil.ExecTestCLICmd(s.cctx.Context, cmd, tc.args) diff --git a/x/mint/simulation/decoder_test.go b/x/mint/simulation/decoder_test.go index 74a634db87..c4122a316a 100644 --- a/x/mint/simulation/decoder_test.go +++ b/x/mint/simulation/decoder_test.go @@ -51,7 +51,6 @@ func TestDecodeStore(t *testing.T) { } for i, tt := range tests { - i, tt := i, tt t.Run(tt.name, func(t *testing.T) { if tt.expectPanic { require.Panics(t, func() { decoder(kvPairs.Pairs[i], kvPairs.Pairs[i]) }, tt.name) diff --git a/x/signal/integration_test.go b/x/signal/integration_test.go index 434b9cf882..c3dd2419dd 100644 --- a/x/signal/integration_test.go +++ b/x/signal/integration_test.go @@ -4,8 +4,9 @@ import ( "testing" "github.com/celestiaorg/celestia-app/v3/app" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" + v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" testutil "github.com/celestiaorg/celestia-app/v3/test/util" - "github.com/celestiaorg/celestia-app/v3/x/signal" "github.com/celestiaorg/celestia-app/v3/x/signal/types" "github.com/stretchr/testify/require" @@ -19,17 +20,19 @@ import ( // simulates an upgrade scenario with a single validator which signals for the version change, checks the quorum // has been reached and then calls TryUpgrade, asserting that the upgrade module returns the new app version func TestUpgradeIntegration(t *testing.T) { - app, _ := testutil.SetupTestAppWithGenesisValSet(app.DefaultConsensusParams()) + cp := app.DefaultConsensusParams() + cp.Version.AppVersion = v2.Version + app, _ := testutil.SetupTestAppWithGenesisValSet(cp) ctx := sdk.NewContext(app.CommitMultiStore(), tmtypes.Header{ Version: tmversion.Consensus{ - App: 1, + App: v2.Version, }, }, false, tmlog.NewNopLogger()) goCtx := sdk.WrapSDKContext(ctx) ctx = sdk.UnwrapSDKContext(goCtx) res, err := app.SignalKeeper.VersionTally(goCtx, &types.QueryVersionTallyRequest{ - Version: 2, + Version: 3, }) require.NoError(t, err) require.EqualValues(t, 0, res.VotingPower) @@ -40,12 +43,12 @@ func TestUpgradeIntegration(t *testing.T) { _, err = app.SignalKeeper.SignalVersion(ctx, &types.MsgSignalVersion{ ValidatorAddress: valAddr.String(), - Version: 2, + Version: 3, }) require.NoError(t, err) res, err = app.SignalKeeper.VersionTally(goCtx, &types.QueryVersionTallyRequest{ - Version: 2, + Version: 3, }) require.NoError(t, err) require.EqualValues(t, 1, res.VotingPower) @@ -65,7 +68,7 @@ func TestUpgradeIntegration(t *testing.T) { // returns an error because an upgrade is pending. _, err = app.SignalKeeper.SignalVersion(ctx, &types.MsgSignalVersion{ ValidatorAddress: valAddr.String(), - Version: 3, + Version: 4, }) require.Error(t, err) require.ErrorIs(t, err, types.ErrUpgradePending) @@ -74,9 +77,9 @@ func TestUpgradeIntegration(t *testing.T) { require.False(t, shouldUpgrade) require.EqualValues(t, 0, version) - ctx = ctx.WithBlockHeight(ctx.BlockHeight() + signal.DefaultUpgradeHeightDelay) + ctx = ctx.WithBlockHeight(ctx.BlockHeight() + appconsts.UpgradeHeightDelay(version)) shouldUpgrade, version = app.SignalKeeper.ShouldUpgrade(ctx) require.True(t, shouldUpgrade) - require.EqualValues(t, 2, version) + require.EqualValues(t, 3, version) } diff --git a/x/signal/keeper.go b/x/signal/keeper.go index ed280c67f4..3ee13a9708 100644 --- a/x/signal/keeper.go +++ b/x/signal/keeper.go @@ -5,6 +5,7 @@ import ( "encoding/binary" sdkmath "cosmossdk.io/math" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" "github.com/celestiaorg/celestia-app/v3/x/signal/types" "github.com/cosmos/cosmos-sdk/codec" storetypes "github.com/cosmos/cosmos-sdk/store/types" @@ -12,11 +13,6 @@ import ( stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" ) -// DefaultUpgradeHeightDelay is the number of blocks after a quorum has been -// reached that the chain should upgrade to the new version. Assuming a block -// interval of 12 seconds, this is 7 days. -const DefaultUpgradeHeightDelay = int64(7 * 24 * 60 * 60 / 12) // 7 days * 24 hours * 60 minutes * 60 seconds / 12 seconds per block = 50,400 blocks. - // Keeper implements the MsgServer and QueryServer interfaces var ( _ types.MsgServer = &Keeper{} @@ -108,7 +104,7 @@ func (k *Keeper) TryUpgrade(ctx context.Context, _ *types.MsgTryUpgrade) (*types } upgrade := types.Upgrade{ AppVersion: version, - UpgradeHeight: sdkCtx.BlockHeader().Height + DefaultUpgradeHeightDelay, + UpgradeHeight: sdkCtx.BlockHeader().Height + appconsts.UpgradeHeightDelay(version), } k.setUpgrade(sdkCtx, upgrade) } diff --git a/x/signal/keeper_test.go b/x/signal/keeper_test.go index a476562696..f79d1c3884 100644 --- a/x/signal/keeper_test.go +++ b/x/signal/keeper_test.go @@ -12,6 +12,7 @@ import ( "github.com/celestiaorg/celestia-app/v3/app" "github.com/celestiaorg/celestia-app/v3/app/encoding" + "github.com/celestiaorg/celestia-app/v3/pkg/appconsts" v1 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v1" v2 "github.com/celestiaorg/celestia-app/v3/pkg/appconsts/v2" "github.com/celestiaorg/celestia-app/v3/x/signal" @@ -182,7 +183,7 @@ func TestTallyingLogic(t *testing.T) { require.False(t, shouldUpgrade) // should be false because upgrade height hasn't been reached. require.Equal(t, uint64(0), version) - ctx = ctx.WithBlockHeight(ctx.BlockHeight() + signal.DefaultUpgradeHeightDelay) + ctx = ctx.WithBlockHeight(ctx.BlockHeight() + appconsts.UpgradeHeightDelay(version)) shouldUpgrade, version = upgradeKeeper.ShouldUpgrade(ctx) require.True(t, shouldUpgrade) // should be true because upgrade height has been reached. @@ -425,7 +426,7 @@ func TestGetUpgrade(t *testing.T) { got, err := upgradeKeeper.GetUpgrade(ctx, &types.QueryGetUpgradeRequest{}) require.NoError(t, err) assert.Equal(t, v2.Version, got.Upgrade.AppVersion) - assert.Equal(t, signal.DefaultUpgradeHeightDelay, got.Upgrade.UpgradeHeight) + assert.Equal(t, appconsts.UpgradeHeightDelay(v2.Version), got.Upgrade.UpgradeHeight) }) }