From 14ba9c610c4fc6dc781ee63847d3b86a3736d342 Mon Sep 17 00:00:00 2001 From: joshvanl Date: Thu, 30 Nov 2023 15:06:41 +0000 Subject: [PATCH 1/4] Updates Go to 1.21, golangci-lint to 1.55.2 Note that, due to the change in import and init func ordering changing in Go 1.21, the grpc proxy proto codec is no longer registered after the grpc internal proto codec definition, meaning the dapr custom definition will not be registered and will cause errors. The custom proto needs to be either re-registered at runtime, or change its name in order to fix this. This is done in the gRPC object, and handler_test.go TestMain. Signed-off-by: joshvanl --- .build-tools/cmd/check-lint-version_test.go | 2 +- .build-tools/go.mod | 2 +- .github/workflows/dapr.yml | 2 +- .github/workflows/test-tooling.yml | 4 +- Makefile | 2 +- docker/Dockerfile-debug | 2 +- docker/Dockerfile-dev | 6 +- docker/README.md | 2 +- docker/custom-scripts/install-dapr-tools.sh | 2 +- docs/development/developing-dapr.md | 2 +- .../development/setup-dapr-development-env.md | 4 +- go.mod | 14 +-- go.sum | 90 ++++++++++++++++--- pkg/grpc/api.go | 3 + pkg/grpc/proxy/codec/codec.go | 4 - pkg/grpc/proxy/handler_test.go | 1 + tests/apps/actorload/Dockerfile | 2 +- tests/apps/actorload/go.mod | 2 +- .../perf/actor-activation-locker/Dockerfile | 2 +- .../apps/perf/actor-activation-locker/go.mod | 2 +- tests/apps/perf/actorfeatures/Dockerfile | 4 +- tests/apps/perf/actorfeatures/go.mod | 2 +- tests/apps/perf/k6-custom/Dockerfile | 2 +- .../perf/service_invocation_grpc/Dockerfile | 2 +- .../apps/perf/service_invocation_grpc/go.mod | 2 +- .../perf/service_invocation_http/Dockerfile | 2 +- .../apps/perf/service_invocation_http/go.mod | 2 +- tests/apps/perf/tester/Dockerfile | 4 +- tests/apps/perf/tester/go.mod | 2 +- tests/apps/pluggable_kafka-bindings/go.mod | 2 +- tests/apps/pluggable_redis-pubsub/go.mod | 2 +- tests/apps/pluggable_redis-statestore/go.mod | 2 +- tests/apps/resiliencyapp/go.mod | 2 +- tests/apps/resiliencyapp_grpc/go.mod | 2 +- .../go.mod | 2 +- .../go.mod | 2 +- .../serviceinvocation/http/httpendpoints.go | 2 +- 37 files changed, 128 insertions(+), 60 deletions(-) diff --git a/.build-tools/cmd/check-lint-version_test.go b/.build-tools/cmd/check-lint-version_test.go index 7c8dec4f008..6db22808068 100644 --- a/.build-tools/cmd/check-lint-version_test.go +++ b/.build-tools/cmd/check-lint-version_test.go @@ -35,7 +35,7 @@ func TestParseWorkflow(t *testing.T) { func TestGetCurrentVersion(t *testing.T) { t.Run("get current version from system", func(t *testing.T) { currentVersion, err := getCurrentVersion() - assert.Equal(t, "v1.51.2", currentVersion) + assert.Equal(t, "v1.55.2", currentVersion) assert.NoError(t, err) }) diff --git a/.build-tools/go.mod b/.build-tools/go.mod index 80b012df5f4..fbe1037578f 100644 --- a/.build-tools/go.mod +++ b/.build-tools/go.mod @@ -1,6 +1,6 @@ module build-tools -go 1.20 +go 1.21 require ( github.com/google/go-containerregistry v0.11.1-0.20220802162123-c1f9836a4fa9 diff --git a/.github/workflows/dapr.yml b/.github/workflows/dapr.yml index 2512d12562f..5fc9b6f25ae 100644 --- a/.github/workflows/dapr.yml +++ b/.github/workflows/dapr.yml @@ -39,7 +39,7 @@ jobs: target_os: ["linux"] target_arch: ["amd64"] env: - GOLANGCILINT_VER: "v1.51.2" + GOLANGCILINT_VER: "v1.55.2" PROTOC_VERSION: "21.12" GOOS: "${{ matrix.target_os }}" GOARCH: "${{ matrix.target_arch }}" diff --git a/.github/workflows/test-tooling.yml b/.github/workflows/test-tooling.yml index c071efa555f..4a6f45d23fe 100644 --- a/.github/workflows/test-tooling.yml +++ b/.github/workflows/test-tooling.yml @@ -25,7 +25,7 @@ jobs: - "macos-latest" runs-on: ${{ matrix.os }} env: - GOLANGCILINT_VER: "v1.51.2" # Make sure to bump /.build-tools/check-lint-version/main_test.go + GOLANGCILINT_VER: "v1.55.2" # Make sure to bump /.build-tools/check-lint-version/main_test.go steps: - name: Checkout @@ -46,4 +46,4 @@ jobs: - name: Test working-directory: ./.build-tools - run: go test ./... \ No newline at end of file + run: go test ./... diff --git a/Makefile b/Makefile index d067ff6d78b..b2c2d091ab3 100644 --- a/Makefile +++ b/Makefile @@ -396,7 +396,7 @@ MODFILES := $(shell find . -name go.mod) define modtidy-target .PHONY: modtidy-$(1) modtidy-$(1): - cd $(shell dirname $(1)); CGO_ENABLED=$(CGO) go mod tidy -compat=1.20; cd - + cd $(shell dirname $(1)); CGO_ENABLED=$(CGO) go mod tidy -compat=1.21; cd - endef # Generate modtidy target action for each go.mod file diff --git a/docker/Dockerfile-debug b/docker/Dockerfile-debug index 30f9bb8e707..51c65225dab 100644 --- a/docker/Dockerfile-debug +++ b/docker/Dockerfile-debug @@ -1,6 +1,6 @@ # current directory must be ./dist -FROM golang:1.20 +FROM golang:1.21 ARG PKG_FILES RUN go install github.com/go-delve/delve/cmd/dlv@latest diff --git a/docker/Dockerfile-dev b/docker/Dockerfile-dev index 664ad75440b..04ee0efd8ca 100644 --- a/docker/Dockerfile-dev +++ b/docker/Dockerfile-dev @@ -1,7 +1,7 @@ # Based on https://github.com/microsoft/vscode-dev-containers/tree/v0.224.3/containers/go/.devcontainer/base.Dockerfile -# [Choice] Go version: 1, 1.20, etc -ARG GOVERSION=1.20 +# [Choice] Go version: 1, 1.21, etc +ARG GOVERSION=1.21 FROM golang:${GOVERSION}-bullseye # [Option] Install zsh @@ -15,7 +15,7 @@ ARG DAPR_CLI_VERSION="latest" ARG PROTOC_VERSION="21.12" ARG PROTOC_GEN_GO_VERSION="1.28.1" ARG PROTOC_GEN_GO_GRPC_VERSION="1.2.0" -ARG GOLANGCI_LINT_VERSION="1.51.2" +ARG GOLANGCI_LINT_VERSION="1.55.2" # This Dockerfile adds a non-root 'dapr' user with sudo access. However, for Linux, # this user's GID/UID must match your local user UID/GID to avoid permission issues diff --git a/docker/README.md b/docker/README.md index 9293107c168..4e9ab51b913 100644 --- a/docker/README.md +++ b/docker/README.md @@ -12,7 +12,7 @@ This includes dockerfiles to build Dapr release and debug images and development The Dev Container can be rebuilt with custom options. Relevant args (and their default values) include: -* `GOVERSION` (default: `1.20`) +* `GOVERSION` (default: `1.21`) * `INSTALL_ZSH` (default: `true`) * `KUBECTL_VERSION` (default: `latest`) * `HELM_VERSION` (default: `latest`) diff --git a/docker/custom-scripts/install-dapr-tools.sh b/docker/custom-scripts/install-dapr-tools.sh index 78fdde6a1af..8d202b134e2 100644 --- a/docker/custom-scripts/install-dapr-tools.sh +++ b/docker/custom-scripts/install-dapr-tools.sh @@ -21,7 +21,7 @@ DAPR_CLI_VERSION=${4:-""} PROTOC_VERSION=${5:-"21.12"} PROTOC_GEN_GO_VERSION=${6:-"1.28.1"} PROTOC_GEN_GO_GRPC_VERSION=${7:-"1.2.0"} -GOLANGCI_LINT_VERSION=${8:-"1.51.2"} +GOLANGCI_LINT_VERSION=${8:-"1.55.2"} set -e diff --git a/docs/development/developing-dapr.md b/docs/development/developing-dapr.md index 94a75330e65..6af3aaba121 100644 --- a/docs/development/developing-dapr.md +++ b/docs/development/developing-dapr.md @@ -63,7 +63,7 @@ This command will: - format, test and lint all the code - check if you forgot to `git commit` something -Note: To run linter locally, please use golangci-lint version v1.51.2, otherwise you might encounter errors. You can download version v1.51.2 [here](https://github.com/golangci/golangci-lint/releases/tag/v1.51.2). +Note: To run linter locally, please use golangci-lint version v1.51.2, otherwise you might encounter errors. You can download version v1.55.2 [here](https://github.com/golangci/golangci-lint/releases/tag/v1.55.2). ## Debug Dapr diff --git a/docs/development/setup-dapr-development-env.md b/docs/development/setup-dapr-development-env.md index 1592b7fa526..64e2fa2ba3e 100644 --- a/docs/development/setup-dapr-development-env.md +++ b/docs/development/setup-dapr-development-env.md @@ -23,11 +23,11 @@ This document helps you get started developing Dapr. If you find any problems wh ## Go (Golang) -1. Download and install [Go 1.20 or later](https://golang.org/doc/install#tarball). +1. Download and install [Go 1.21 or later](https://golang.org/doc/install#tarball). 2. Install [Delve](https://github.com/go-delve/delve/tree/master/Documentation/installation) for Go debugging, if desired. -3. Install [golangci-lint](https://golangci-lint.run/usage/install) version 1.51.2. +3. Install [golangci-lint](https://golangci-lint.run/usage/install) version 1.55.2. ## Setup a Kubernetes development environment diff --git a/go.mod b/go.mod index 2b42e6dbb91..8aee45226cf 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/dapr/dapr -go 1.20 +go 1.21 + +toolchain go1.21.4 require ( contrib.go.opencensus.io/exporter/prometheus v0.4.2 @@ -9,7 +11,7 @@ require ( github.com/argoproj/argo-rollouts v1.4.1 github.com/cenkalti/backoff/v4 v4.2.1 github.com/cloudevents/sdk-go/v2 v2.14.0 - github.com/dapr/components-contrib v1.12.1-0.20231102232056-f4e73b0e6511 + github.com/dapr/components-contrib v1.12.1-0.20231129042434-36a055ebd8d7 github.com/dapr/kit v0.12.2-0.20231031211530-0e1fd37fc4b3 github.com/evanphx/json-patch/v5 v5.7.0 github.com/go-chi/chi/v5 v5.0.10 @@ -94,13 +96,13 @@ require ( github.com/AthenZ/athenz v1.10.39 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai v0.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.6 // indirect - github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.0.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.0.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventgrid/armeventgrid/v2 v2.1.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.1.1 // indirect diff --git a/go.sum b/go.sum index 9d23487fbd2..7d6438456e7 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,7 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc= cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= cloud.google.com/go/kms v1.15.3 h1:RYsbxTRmk91ydKCzekI2YjryO4c5Y2M80Zwcs9/D/cI= +cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -66,20 +67,20 @@ github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai v0.3.0 h1:x7fb22Q43h2DRFCvp9rAua8PoV3gwtl1bK5+pihnihA= github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai v0.3.0/go.mod h1:zPJgGMjMheJJrYgrQ4W8NrNCWtWXAkjI3KWYFnTtwdA= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.6.0 h1:LToWxpzz8huiO1uybg8dlehzjNqOyx+Rol2B0+PSV3s= github.com/Azure/azure-sdk-for-go/sdk/data/azappconfig v0.6.0/go.mod h1:HnoLsZ2bAldgSQ5ngZj4iVA6zcA3LYQXKxs/dWftaHA= github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.6 h1:oBqQLSI1pZwGOdXJAoJJSzmff9tlfD4KroVfjQQmd0g= github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v0.3.6/go.mod h1:Beh5cHIXJ0oWEDWk9lNFtuklCojLLQ5hl+LqSNTTs0I= -github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.2 h1:iXFUCl7NK2DPVKfixcYDPGj3uLV7yf5eolBsoWD8Sc4= -github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.2/go.mod h1:E1WPwLx0wZyus7NBHjhrHE1QgWwKJPE81fnUbT+FxqI= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0 h1:TuEMD+E+1aTjjLICGQOW6vLe8UWES7kopac9mUXL56Y= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.0.1 h1:7G4EhZbWFwfgkNfJkNoZmFL8FfWT6P96YVwG71uhNxY= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.0.1/go.mod h1:fswVBSaYFoW4XXp3oXG0vuDVdToLr3kRzgp5oePMq5g= +github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.1.0 h1:ONYihl/vbwtVAmEmqoVDCGyhad2CIMN2kg3BO8Y5cFk= +github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.1.0/go.mod h1:PMB5kQ1apg/irrvpPryVdchapVIYP+VV9iHJQ2CHwG8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.0.2 h1:ujuMdFIUqhfohvpjjt7YmWn6Wk5Vlw9cwtGC0/BEwLU= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.0.2/go.mod h1:P39PnDHXbDhUV+BVw/8Nb7wQnM76jKUA7qx5T7eS+BU= github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.5.0 h1:HKHkea1fdm18LT8VAxTVZgJpPsLgv+0NZhmtus1UqJQ= github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.5.0/go.mod h1:4BbKA+mRmmTP8VaLfDPNF5nOdhRm5upG3AXVWfv1dxc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventgrid/armeventgrid/v2 v2.1.1 h1:q8d6Cw16DrwJ+o82GMEQ+xt65q7w4m7VcI4C+gK/7Jk= @@ -87,8 +88,11 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventgrid/armeventgrid/v2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.1.1 h1:gZ1ZZvrVUhDNsGNpbo2N87Y0CJB8p3IS5UH9Z4Ui97g= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.1.1/go.mod h1:7fQVOnRA11ScLE8dOCWanXHQa2NMFOM2i0u/1VRICXA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.0.1 h1:8TkzQBrN9PWIwo7ekdd696KpC6IfTltV2/F8qKKBWik= @@ -108,6 +112,7 @@ github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= @@ -117,6 +122,7 @@ github.com/IBM/sarama v1.42.1 h1:wugyWa15TDEHh2kvq2gAy1IHLjEjuYOYgXz/ruC/OSQ= github.com/IBM/sarama v1.42.1/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Netflix/go-env v0.0.0-20220526054621-78278af1949d h1:wvStE9wLpws31NiWUx+38wny1msZ/tm+eL5xmm4Y7So= github.com/Netflix/go-env v0.0.0-20220526054621-78278af1949d/go.mod h1:9XMFaCeRyW7fC9XJOWQ+NdAv8VLG7ys7l3x4ozEGLUQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -146,6 +152,7 @@ github.com/agiledragon/gomonkey v2.0.2+incompatible/go.mod h1:2NGfXu1a80LLr2cmWX github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -187,7 +194,9 @@ github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5 github.com/alibabacloud-go/tea-xml v1.1.2 h1:oLxa7JUXm2EDFzMg+7oRsYc+kutgCVwm+bZlhhmvW5M= github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.30.5 h1:3r6kTHdKnuP4fkS8k2IrvSfxpxUTcW1SOL0wN7b7Dt0= +github.com/alicebob/miniredis/v2 v2.30.5/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= github.com/aliyun/alibaba-cloud-sdk-go v1.61.18/go.mod h1:v8ESoHo4SyHmuB4b1tJqDHxfTGEciD+yhvOU/5s1Rfk= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1704/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/aliyun/aliyun-log-go-sdk v0.1.54 h1:ejQygZTGBqTs4V9qQUunWYtFwyKUWXYryfgrX9OhOlg= @@ -235,6 +244,7 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= @@ -282,12 +292,15 @@ github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= +github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= github.com/bytedance/gopkg v0.0.0-20220413063733-65bf48ffb3a7/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q= github.com/bytedance/gopkg v0.0.0-20220817015305-b879a72dc90f h1:U3Bk6S9UyqFM5tU3bZ3pwqx5xyypHP7Bm2QCbOUwxSc= github.com/bytedance/gopkg v0.0.0-20220817015305-b879a72dc90f/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q= @@ -342,6 +355,7 @@ github.com/cloudwego/frugal v0.1.8/go.mod h1:F0mLIWHymuQgh6r8N0owTA/ARv1B4SOiKa8 github.com/cloudwego/kitex v0.5.0 h1:f/rip2gp8mdeTpi0WQFv7BdDdkdZn/Q0KvBCm9Mi+7c= github.com/cloudwego/kitex v0.5.0/go.mod h1:yhw7XikNVG4RstmlQAidBuxMlZYpIiCLsDU8eHPGEMo= github.com/cloudwego/kitex-examples v0.1.1 h1:5uGqbGEobl8pKSVKwaWgltuf/JAa8Fg2MioX4WmlCXw= +github.com/cloudwego/kitex-examples v0.1.1/go.mod h1:5V7LsSJtY18KnceJdvpxYswOfgV3kXE0BGm5mRYyuAg= github.com/cloudwego/netpoll v0.3.2 h1:/998ICrNMVBo4mlul4j7qcIeY7QnEfuCCPPwck9S3X4= github.com/cloudwego/netpoll v0.3.2/go.mod h1:xVefXptcyheopwNDZjDPcfU6kIjZXZ4nY550k1yH9eQ= github.com/cloudwego/thriftgo v0.2.8/go.mod h1:dAyXHEmKXo0LfMCrblVEY3mUZsdeuA5+i0vF5f09j7E= @@ -391,8 +405,8 @@ github.com/dancannon/gorethink v4.0.0+incompatible h1:KFV7Gha3AuqT+gr0B/eKvGhbjm github.com/dancannon/gorethink v4.0.0+incompatible/go.mod h1:BLvkat9KmZc1efyYwhz3WnybhRZtgF1K929FD8z1avU= github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= -github.com/dapr/components-contrib v1.12.1-0.20231102232056-f4e73b0e6511 h1:vn9QQj3kbLgoB7f+TmER7BVuBn6Q8jHjQjkPmY9LemA= -github.com/dapr/components-contrib v1.12.1-0.20231102232056-f4e73b0e6511/go.mod h1:54ef0yk4j8mvYjKQmRtxJFxyYc+kJnErRCeMeiqDJRM= +github.com/dapr/components-contrib v1.12.1-0.20231129042434-36a055ebd8d7 h1:f0n7cuFO2OTdeLwNDsdNs7eLwfIuhKCXi/Aeh/IbYXA= +github.com/dapr/components-contrib v1.12.1-0.20231129042434-36a055ebd8d7/go.mod h1:s4vy1EFMh/9xMoeD0FvIH2D1LI3BHutzRwT1yKxGD+c= github.com/dapr/kit v0.12.2-0.20231031211530-0e1fd37fc4b3 h1:xsmVK3YOKRMOcaxqo50Ce0apQzq+LzAfWuFapQuu8Ro= github.com/dapr/kit v0.12.2-0.20231031211530-0e1fd37fc4b3/go.mod h1:c3Z78F+h7UYtb0LmpzJNC/ChT240ycDJFViRUztdpoo= github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -409,7 +423,9 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3 github.com/deepmap/oapi-codegen v1.11.0 h1:f/X2NdIkaBKsSdpeuwLnY/vDI0AtPUrmB5LMgc7YD+A= github.com/deepmap/oapi-codegen v1.11.0/go.mod h1:k+ujhoQGxmQYBZBbxhOZNZf4j08qv5mC+OH+fFTnKxM= github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= @@ -421,6 +437,7 @@ github.com/didip/tollbooth/v7 v7.0.1/go.mod h1:VZhDSGl5bDSPj4wPsih3PFa4Uh9Ghv8hg github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dubbogo/go-zookeeper v1.0.3/go.mod h1:fn6n2CAEer3novYgk9ULLwAjuV8/g4DdC2ENwRb6E+c= github.com/dubbogo/go-zookeeper v1.0.4-0.20211212162352-f9d2183d89d5/go.mod h1:fn6n2CAEer3novYgk9ULLwAjuV8/g4DdC2ENwRb6E+c= @@ -454,6 +471,7 @@ github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQ github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -488,23 +506,28 @@ github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBD github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/getkin/kin-openapi v0.94.0/go.mod h1:LWZfzOd7PRy8GJ1dJ6mCU6tNdSfOwRac1BUPam4aw6Q= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -533,6 +556,7 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= @@ -560,6 +584,7 @@ github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -638,6 +663,7 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -714,6 +740,7 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -722,6 +749,7 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -802,6 +830,7 @@ github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUU github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -855,6 +884,7 @@ github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -901,6 +931,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1: github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -947,6 +978,7 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -974,7 +1006,9 @@ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kataras/go-errors v0.0.3 h1:RQSGEb5AHjsGbwhNW8mFC7a9JrgoCLHC8CBQ4keXJYU= +github.com/kataras/go-errors v0.0.3/go.mod h1:K3ncz8UzwI3bpuksXt5tQLmrRlgxfv+52ARvAu1+I+o= github.com/kataras/go-serializer v0.0.4 h1:isugggrY3DSac67duzQ/tn31mGAUtYqNpE2ob6Xt/SY= +github.com/kataras/go-serializer v0.0.4/go.mod h1:/EyLBhXKQOJ12dZwpUZZje3lGy+3wnvG7QKaVJtm/no= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= @@ -1000,6 +1034,7 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -1063,6 +1098,7 @@ github.com/matoous/go-nanoid v1.5.0/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6U github.com/matoous/go-nanoid/v2 v2.0.0 h1:d19kur2QuLeHmJBkvYkFdhFBzLoo1XVm2GgTpL+9Tj0= github.com/matoous/go-nanoid/v2 v2.0.0/go.mod h1:FtS4aGPVfEkxKxhdWPAspZpZSh1cOjtM7Ej/So3hR0g= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -1085,10 +1121,12 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/microcosm-cc/bluemonday v1.0.24 h1:NGQoPtwGVcbGkKfvyYk1yRqknzBuoMiUrO6R7uFTPlw= +github.com/microcosm-cc/bluemonday v1.0.24/go.mod h1:ArQySAMps0790cHSkdPEJ7bGkF2VePWH773hsJNSHf8= github.com/microsoft/durabletask-go v0.3.1 h1:Y7RrPefd4cz5GMxjMx/Zvf9r5INombNlzI0DaQd994k= github.com/microsoft/durabletask-go v0.3.1/go.mod h1:t3u0iRvIadT1y4MD5cUG0mbTOqgANT6IFcLogv7o0M0= github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= @@ -1100,6 +1138,7 @@ github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJys github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -1140,6 +1179,7 @@ github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mrz1836/postmark v1.6.1 h1:UHAs9WuZEBZj12MdZ/iVRyoC4tq3ODTdYhE17OhJeJ4= github.com/mrz1836/postmark v1.6.1/go.mod h1:6z5MxAH00Kj44owtQaryv9Pbqp5OKT3wWcRSydB0p0A= github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= @@ -1158,9 +1198,11 @@ github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6Yf github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/jwt/v2 v2.4.1 h1:Y35W1dgbbz2SQUYDPCaclXcuqleVmpbRa7646Jf2EX4= +github.com/nats-io/jwt/v2 v2.5.0 h1:WQQ40AAlqqfx+f6ku+i0pOVm+ASirD4fUh+oQsiE9Ak= +github.com/nats-io/jwt/v2 v2.5.0/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats-server/v2 v2.9.21 h1:2TBTh0UDE74eNXQmV4HofsmRSCiVN0TH2Wgrp6BD6fk= +github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ1wbikExU= +github.com/nats-io/nats-server/v2 v2.9.23/go.mod h1:wEjrEy9vnqIGE4Pqz4/c75v9Pmaq7My2IgFmnykc4C0= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c= github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc= @@ -1241,6 +1283,7 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pashagolub/pgxmock/v2 v2.12.0 h1:IVRmQtVFNCoq7NOZ+PdfvB6fwnLJmEuWDhnc3yrDxBs= +github.com/pashagolub/pgxmock/v2 v2.12.0/go.mod h1:D3YslkN/nJ4+umVqWmbwfSXugJIjPMChkGBG47OJpNw= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -1249,6 +1292,7 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= +github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -1283,6 +1327,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= @@ -1360,10 +1405,12 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= @@ -1382,6 +1429,7 @@ github.com/sendgrid/rest v2.6.9+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV github.com/sendgrid/sendgrid-go v3.13.0+incompatible h1:HZrzc06/QfBGesY9o3n1lvBrRONA+57rbDRKet7plos= github.com/sendgrid/sendgrid-go v3.13.0+incompatible/go.mod h1:QRQt+LX/NmgVEvmdRw0VT/QgUn499+iza2FnDca9fg8= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.21.6/go.mod h1:JfVbDpIBLVzT8oKbvMg9P3wEIMDDpVn+LwHTKj0ST88= github.com/shirou/gopsutil/v3 v3.22.2 h1:wCrArWFkHYIdDxx/FSfF5RB4dpJYW6t7rcp3+zL8uks= @@ -1417,6 +1465,7 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -1426,6 +1475,7 @@ github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1433,11 +1483,13 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/spiffe/go-spiffe/v2 v2.1.6 h1:4SdizuQieFyL9eNU+SPiCArH4kynzaKOOj0VvM8R7Xo= github.com/spiffe/go-spiffe/v2 v2.1.6/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= github.com/stealthrocket/wasi-go v0.8.1-0.20230912180546-8efbab50fb58 h1:mTC4gyv3lcJ1XpzZMAckqkvWUqeT5Bva4RAT1IoHAAA= github.com/stealthrocket/wasi-go v0.8.1-0.20230912180546-8efbab50fb58/go.mod h1:ZAYCOqLJkc9P6fcq14TV4cf+gJ2fHthp9kCGxBViagE= github.com/stealthrocket/wazergo v0.19.1 h1:BPrITETPgSFwiytwmToO0MbUC/+RGC39JScz1JmmG6c= +github.com/stealthrocket/wazergo v0.19.1/go.mod h1:riI0hxw4ndZA5e6z7PesHg2BtTftcZaMxRcoiGGipTs= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1465,6 +1517,7 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/supplyon/gremcos v0.1.40 h1:OFJw3MV44HNE9N6SKYK0zRBbEwyugyyjjqeXiGi5E3w= github.com/supplyon/gremcos v0.1.40/go.mod h1:LI6lxKObicSoIw1N04rHyjz9tGSaevM6Ydbo3XfyZfA= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= @@ -1538,12 +1591,15 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1594,6 +1650,7 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= @@ -1656,6 +1713,7 @@ go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= goji.io v2.0.2+incompatible h1:uIssv/elbKRLznFUy3Xj4+2Mz/qKhek/9aZQDUMae7c= +goji.io v2.0.2+incompatible/go.mod h1:sbqFwrtqZACxLBTQcdgVjFh54yGVCvwq8+w49MVMMIk= golang.org/x/arch v0.0.0-20201008161808-52c3e6f60cff/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= golang.org/x/arch v0.2.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= @@ -2202,6 +2260,7 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20 h1:MLBCGN1O7GzIx+cBiwfYPwtmZ41U3Mn/cotLJciaArI= +google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20/go.mod h1:Nr5H8+MlGWr5+xX/STzdoEqJrO+YteqFbMyCsrb6mH0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2237,6 +2296,7 @@ gopkg.in/couchbase/gocb.v1 v1.6.7/go.mod h1:Ri5Qok4ZKiwmPr75YxZ0uELQy45XJgUSzeUn gopkg.in/couchbase/gocbcore.v7 v7.1.18 h1:d4yfIXWdf/ZmyuJjwRVVlGT/yqx8ICy6fcT/ViaMZsI= gopkg.in/couchbase/gocbcore.v7 v7.1.18/go.mod h1:48d2Be0MxRtsyuvn+mWzqmoGUG9uA00ghopzOs148/E= gopkg.in/couchbaselabs/gojcbmock.v1 v1.0.4 h1:r5WoWGyeTJQiNGsoWAsMJfz0JFF14xc2TJrYSs09VXk= +gopkg.in/couchbaselabs/gojcbmock.v1 v1.0.4/go.mod h1:jl/gd/aQ2S8whKVSTnsPs6n7BPeaAuw9UglBD/OF7eo= gopkg.in/couchbaselabs/jsonx.v1 v1.0.1 h1:giDAdTGcyXUuY+uFCWeJ2foukiqMTYl4ORSxCi/ybcc= gopkg.in/couchbaselabs/jsonx.v1 v1.0.1/go.mod h1:oR201IRovxvLW/eISevH12/+MiKHtNQAKfcX8iWZvJY= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -2257,6 +2317,7 @@ gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/kataras/go-serializer.v0 v0.0.4 h1:mVy3gjU4zZZBe+8JbZDRTMPJdrB0lzBNsLLREBcKGgU= +gopkg.in/kataras/go-serializer.v0 v0.0.4/go.mod h1:v2jHg/3Wp7uncDNzenTsX75PRDxhzlxoo/qDvM4ZGxk= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -2325,7 +2386,9 @@ modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0= modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/libc v1.29.0 h1:tTFRFq69YKCF2QyGNuRUQxKBm1uZZLubf6Cjh/pVHXs= modernc.org/libc v1.29.0/go.mod h1:DaG/4Q3LRRdqpiLyP0C2m1B8ZMGkQ+cCgOIjEtQlYhQ= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= @@ -2339,10 +2402,13 @@ modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0 modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= +modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= +modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/pkg/grpc/api.go b/pkg/grpc/api.go index c4b7e991afd..50523149a71 100644 --- a/pkg/grpc/api.go +++ b/pkg/grpc/api.go @@ -45,6 +45,7 @@ import ( diagUtils "github.com/dapr/dapr/pkg/diagnostics/utils" "github.com/dapr/dapr/pkg/encryption" "github.com/dapr/dapr/pkg/grpc/metadata" + "github.com/dapr/dapr/pkg/grpc/proxy/codec" "github.com/dapr/dapr/pkg/grpc/universalapi" "github.com/dapr/dapr/pkg/messages" invokev1 "github.com/dapr/dapr/pkg/messaging/v1" @@ -102,6 +103,8 @@ type APIOpts struct { func NewAPI(opts APIOpts) API { opts.UniversalAPI.InitUniversalAPI() + codec.Register() + return &api{ UniversalAPI: opts.UniversalAPI, directMessaging: opts.DirectMessaging, diff --git a/pkg/grpc/proxy/codec/codec.go b/pkg/grpc/proxy/codec/codec.go index 8bb161b3c36..5af597dc7c0 100644 --- a/pkg/grpc/proxy/codec/codec.go +++ b/pkg/grpc/proxy/codec/codec.go @@ -15,10 +15,6 @@ import ( // We have to say that we are the "proto" codec otherwise marshaling will fail. const Name = "proto" -func init() { - Register() -} - // Register manually registers the codec. func Register() { encoding.RegisterCodec(codec()) diff --git a/pkg/grpc/proxy/handler_test.go b/pkg/grpc/proxy/handler_test.go index 9af60409a93..6cb57c14892 100644 --- a/pkg/grpc/proxy/handler_test.go +++ b/pkg/grpc/proxy/handler_test.go @@ -861,6 +861,7 @@ func (s *proxyTestSuite) TearDownSuite() { } func TestProxySuite(t *testing.T) { + codec.Register() suite.Run(t, &proxyTestSuite{}) } diff --git a/tests/apps/actorload/Dockerfile b/tests/apps/actorload/Dockerfile index bf88e8ce7ac..b8324f54cc2 100644 --- a/tests/apps/actorload/Dockerfile +++ b/tests/apps/actorload/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20 +FROM golang:1.21 WORKDIR /actorload/ COPY . . RUN make build diff --git a/tests/apps/actorload/go.mod b/tests/apps/actorload/go.mod index e3656e09fd4..94ca00e54fb 100644 --- a/tests/apps/actorload/go.mod +++ b/tests/apps/actorload/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/actorload -go 1.20 +go 1.21 require ( fortio.org/fortio v1.6.8 diff --git a/tests/apps/perf/actor-activation-locker/Dockerfile b/tests/apps/perf/actor-activation-locker/Dockerfile index ac5fa00e5e1..1c4fe95f8e2 100644 --- a/tests/apps/perf/actor-activation-locker/Dockerfile +++ b/tests/apps/perf/actor-activation-locker/Dockerfile @@ -11,7 +11,7 @@ # limitations under the License. # -FROM golang:1.20 as build_env +FROM golang:1.21 as build_env ENV CGO_ENABLED=0 WORKDIR /app diff --git a/tests/apps/perf/actor-activation-locker/go.mod b/tests/apps/perf/actor-activation-locker/go.mod index 668902935a6..1ec6baec773 100644 --- a/tests/apps/perf/actor-activation-locker/go.mod +++ b/tests/apps/perf/actor-activation-locker/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/perf/actor-activation-locker -go 1.20 +go 1.21 require ( github.com/bsm/redislock v0.8.2 diff --git a/tests/apps/perf/actorfeatures/Dockerfile b/tests/apps/perf/actorfeatures/Dockerfile index 23bab85cc84..49e5cde740f 100644 --- a/tests/apps/perf/actorfeatures/Dockerfile +++ b/tests/apps/perf/actorfeatures/Dockerfile @@ -1,11 +1,11 @@ -FROM golang:1.20-bullseye as build_env +FROM golang:1.21-bullseye as build_env ENV CGO_ENABLED=0 WORKDIR /app COPY *.go go.mod go.sum ./ RUN go get -d -v && go build -o tester . -FROM golang:1.20-bullseye as fortio_build_env +FROM golang:1.21-bullseye as fortio_build_env WORKDIR /fortio ADD "https://api.github.com/repos/dapr/fortio/branches/v1.38.4-dapr" skipcache diff --git a/tests/apps/perf/actorfeatures/go.mod b/tests/apps/perf/actorfeatures/go.mod index 299983904c1..8b34598f2c5 100644 --- a/tests/apps/perf/actorfeatures/go.mod +++ b/tests/apps/perf/actorfeatures/go.mod @@ -1,5 +1,5 @@ module github.com/dapr/dapr/tests/apps/perf/tester -go 1.20 +go 1.21 require github.com/go-chi/chi/v5 v5.0.10 diff --git a/tests/apps/perf/k6-custom/Dockerfile b/tests/apps/perf/k6-custom/Dockerfile index 19df132079e..911d052645b 100644 --- a/tests/apps/perf/k6-custom/Dockerfile +++ b/tests/apps/perf/k6-custom/Dockerfile @@ -1,5 +1,5 @@ # Build the k6 binary with the extension -FROM golang:1.20 as builder +FROM golang:1.21 as builder RUN go install go.k6.io/xk6/cmd/xk6@latest RUN xk6 build --output /k6 --with github.com/grafana/xk6-output-prometheus-remote@v0.1.0 --with github.com/grafana/xk6-disruptor@350f53204c65040e71757f98a330665a8f189f91 diff --git a/tests/apps/perf/service_invocation_grpc/Dockerfile b/tests/apps/perf/service_invocation_grpc/Dockerfile index 464c4dc5880..786062fdd54 100644 --- a/tests/apps/perf/service_invocation_grpc/Dockerfile +++ b/tests/apps/perf/service_invocation_grpc/Dockerfile @@ -11,7 +11,7 @@ # limitations under the License. # -FROM golang:1.20 as build_env +FROM golang:1.21 as build_env ENV CGO_ENABLED=0 WORKDIR /app diff --git a/tests/apps/perf/service_invocation_grpc/go.mod b/tests/apps/perf/service_invocation_grpc/go.mod index b0fc6d49cd2..0078c9194a5 100644 --- a/tests/apps/perf/service_invocation_grpc/go.mod +++ b/tests/apps/perf/service_invocation_grpc/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/perf/service_invocation_grpc -go 1.20 +go 1.21 require github.com/dapr/go-sdk v1.8.0 diff --git a/tests/apps/perf/service_invocation_http/Dockerfile b/tests/apps/perf/service_invocation_http/Dockerfile index 8205b965317..412da691911 100644 --- a/tests/apps/perf/service_invocation_http/Dockerfile +++ b/tests/apps/perf/service_invocation_http/Dockerfile @@ -11,7 +11,7 @@ # limitations under the License. # -FROM golang:1.20 as build_env +FROM golang:1.21 as build_env ENV CGO_ENABLED=0 WORKDIR /app diff --git a/tests/apps/perf/service_invocation_http/go.mod b/tests/apps/perf/service_invocation_http/go.mod index a6146e21407..726320b37be 100644 --- a/tests/apps/perf/service_invocation_http/go.mod +++ b/tests/apps/perf/service_invocation_http/go.mod @@ -1,3 +1,3 @@ module github.com/dapr/dapr/tests/apps/perf/service_invocation_http -go 1.20 +go 1.21 diff --git a/tests/apps/perf/tester/Dockerfile b/tests/apps/perf/tester/Dockerfile index 3a0043aa4dd..884b0679736 100644 --- a/tests/apps/perf/tester/Dockerfile +++ b/tests/apps/perf/tester/Dockerfile @@ -1,11 +1,11 @@ -FROM golang:1.20-bullseye as build_env +FROM golang:1.21-bullseye as build_env ENV CGO_ENABLED=0 WORKDIR /app COPY *.go go.mod ./ RUN go get -d -v && go build -o tester . -FROM golang:1.20-bullseye as fortio_build_env +FROM golang:1.21-bullseye as fortio_build_env WORKDIR /fortio ADD "https://api.github.com/repos/dapr/fortio/branches/v1.38.4-dapr" skipcache diff --git a/tests/apps/perf/tester/go.mod b/tests/apps/perf/tester/go.mod index 163c2d68019..711fcbb75ee 100644 --- a/tests/apps/perf/tester/go.mod +++ b/tests/apps/perf/tester/go.mod @@ -1,3 +1,3 @@ module github.com/dapr/dapr/tests/apps/perf/tester -go 1.20 +go 1.21 diff --git a/tests/apps/pluggable_kafka-bindings/go.mod b/tests/apps/pluggable_kafka-bindings/go.mod index c26784a95c4..ea4a2a37005 100644 --- a/tests/apps/pluggable_kafka-bindings/go.mod +++ b/tests/apps/pluggable_kafka-bindings/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/kafka-bindings -go 1.20 +go 1.21 require ( github.com/dapr-sandbox/components-go-sdk v0.0.0-20221213200551-bd485eb929ff diff --git a/tests/apps/pluggable_redis-pubsub/go.mod b/tests/apps/pluggable_redis-pubsub/go.mod index dc06a05197d..91c0338d4af 100644 --- a/tests/apps/pluggable_redis-pubsub/go.mod +++ b/tests/apps/pluggable_redis-pubsub/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/pluggable_redis-pubsub -go 1.20 +go 1.21 require ( github.com/dapr-sandbox/components-go-sdk v0.0.0-20221213200551-bd485eb929ff diff --git a/tests/apps/pluggable_redis-statestore/go.mod b/tests/apps/pluggable_redis-statestore/go.mod index 4cf4a814034..67f419046f4 100644 --- a/tests/apps/pluggable_redis-statestore/go.mod +++ b/tests/apps/pluggable_redis-statestore/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/pluggable_redis-statestore -go 1.20 +go 1.21 require ( github.com/dapr-sandbox/components-go-sdk v0.0.0-20221213200551-bd485eb929ff diff --git a/tests/apps/resiliencyapp/go.mod b/tests/apps/resiliencyapp/go.mod index ee0c207b336..2b57a91208e 100644 --- a/tests/apps/resiliencyapp/go.mod +++ b/tests/apps/resiliencyapp/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/resiliencyapp -go 1.20 +go 1.21 require ( github.com/dapr/dapr v0.0.0 diff --git a/tests/apps/resiliencyapp_grpc/go.mod b/tests/apps/resiliencyapp_grpc/go.mod index 0df2303ebf3..65fcd0a7930 100644 --- a/tests/apps/resiliencyapp_grpc/go.mod +++ b/tests/apps/resiliencyapp_grpc/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/resiliencyapp_grpc -go 1.20 +go 1.21 require ( github.com/dapr/dapr v1.7.4 diff --git a/tests/apps/service_invocation_grpc_proxy_client/go.mod b/tests/apps/service_invocation_grpc_proxy_client/go.mod index c30287eb0da..8ab0de0810c 100644 --- a/tests/apps/service_invocation_grpc_proxy_client/go.mod +++ b/tests/apps/service_invocation_grpc_proxy_client/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/service_invocation_grpc_proxy_client -go 1.20 +go 1.21 require ( github.com/dapr/dapr v0.0.0-00010101000000-000000000000 diff --git a/tests/apps/service_invocation_grpc_proxy_server/go.mod b/tests/apps/service_invocation_grpc_proxy_server/go.mod index 290bc4c55cc..eebcdd79259 100644 --- a/tests/apps/service_invocation_grpc_proxy_server/go.mod +++ b/tests/apps/service_invocation_grpc_proxy_server/go.mod @@ -1,6 +1,6 @@ module github.com/dapr/dapr/tests/apps/service_invocation_grpc_proxy_server -go 1.20 +go 1.21 require ( google.golang.org/grpc v1.54.0 diff --git a/tests/integration/suite/daprd/serviceinvocation/http/httpendpoints.go b/tests/integration/suite/daprd/serviceinvocation/http/httpendpoints.go index dd3255cccea..22bd10537a6 100644 --- a/tests/integration/suite/daprd/serviceinvocation/http/httpendpoints.go +++ b/tests/integration/suite/daprd/serviceinvocation/http/httpendpoints.go @@ -245,7 +245,7 @@ func (h *httpendpoints) Run(t *testing.T, ctx context.Context) { t.Run("bad PKI", func(t *testing.T) { invokeTests(t, http.StatusInternalServerError, func(t *testing.T, body string) { assert.Contains(t, body, `"errorCode":"ERR_DIRECT_INVOKE"`) - assert.Contains(t, body, "tls: bad certificate") + assert.Contains(t, body, "tls: unknown certificate authority") }, h.daprd2) }) } From 328da65095f45bcdc45e348d36b163c5aed4a918 Mon Sep 17 00:00:00 2001 From: joshvanl Date: Fri, 1 Dec 2023 01:02:39 +0000 Subject: [PATCH 2/4] Lint code Signed-off-by: joshvanl --- .golangci.yml | 81 ++-- pkg/acl/acl_test.go | 46 +-- pkg/actors/actor_lock_test.go | 9 +- pkg/actors/actor_test.go | 6 +- pkg/actors/actors.go | 18 +- pkg/actors/actors_test.go | 68 ++-- pkg/actors/config_test.go | 2 +- pkg/actors/errors/actor_error.go | 2 +- pkg/actors/internal/reminder_test.go | 4 +- pkg/actors/internal_actor_test.go | 36 +- pkg/actors/placement/client_test.go | 10 +- pkg/actors/placement/placement.go | 28 +- pkg/actors/placement/placement_test.go | 14 +- pkg/actors/reminders/reminders_test.go | 144 +++---- pkg/actors/timers/timers_test.go | 18 +- pkg/apis/configuration/v1alpha1/types.go | 8 +- pkg/apis/resiliency/v1alpha1/types.go | 42 +- pkg/apphealth/health_test.go | 6 +- pkg/channel/grpc/grpc_channel.go | 6 +- pkg/channel/grpc/grpc_channel_test.go | 10 +- pkg/channel/http/http_channel.go | 16 +- pkg/channel/http/http_channel_test.go | 58 +-- .../testing/grpc_channel_server_mock.go | 14 +- pkg/components/bindings/input_pluggable.go | 12 +- .../bindings/input_pluggable_test.go | 2 +- pkg/components/bindings/output_pluggable.go | 6 +- .../bindings/output_pluggable_test.go | 4 +- pkg/components/bindings/registry_test.go | 15 +- pkg/components/configuration/registry_test.go | 9 +- pkg/components/crypto/registry_test.go | 9 +- pkg/components/disk_manifest_loader_test.go | 6 +- pkg/components/kubernetes_loader_test.go | 7 +- pkg/components/local_loader_test.go | 15 +- pkg/components/lock/lock_config_test.go | 4 +- pkg/components/lock/registry_test.go | 4 +- .../middleware/http/registry_test.go | 17 +- .../nameresolution/registry_test.go | 9 +- pkg/components/pluggable/discovery_test.go | 16 +- pkg/components/pluggable/errors_test.go | 6 +- pkg/components/pubsub/pluggable.go | 28 +- pkg/components/pubsub/pluggable_test.go | 12 +- pkg/components/pubsub/registry_test.go | 9 +- pkg/components/secretstores/pluggable.go | 6 +- pkg/components/secretstores/pluggable_test.go | 4 +- pkg/components/secretstores/registry_test.go | 9 +- pkg/components/state/pluggable.go | 32 +- pkg/components/state/pluggable_test.go | 110 +++--- pkg/components/state/registry_test.go | 23 +- pkg/components/state/state_config_test.go | 12 +- pkg/config/configuration.go | 86 ++--- pkg/config/configuration_test.go | 16 +- pkg/diagnostics/component_monitoring_test.go | 20 +- pkg/diagnostics/grpc_monitoring_test.go | 36 +- pkg/diagnostics/grpc_tracing.go | 8 +- pkg/diagnostics/grpc_tracing_test.go | 54 +-- pkg/diagnostics/http_monitoring_test.go | 16 +- pkg/diagnostics/http_tracing_test.go | 19 +- pkg/diagnostics/resiliency_monitoring_test.go | 8 +- pkg/diagnostics/service_monitoring_test.go | 2 +- pkg/diagnostics/tracing_test.go | 11 +- pkg/diagnostics/utils/metrics_utils_test.go | 2 +- pkg/encryption/encryption_test.go | 21 +- pkg/encryption/state_test.go | 19 +- pkg/expr/expr_test.go | 4 +- pkg/grpc/api.go | 358 +++++++++--------- pkg/grpc/api_actor_test.go | 11 +- pkg/grpc/api_crypto.go | 30 +- pkg/grpc/api_crypto_test.go | 8 +- pkg/grpc/api_daprinternal.go | 26 +- pkg/grpc/api_daprinternal_test.go | 8 +- pkg/grpc/api_test.go | 244 ++++++------ pkg/grpc/endpoints_test.go | 13 +- pkg/grpc/manager/pool_test.go | 8 +- pkg/grpc/proxy/codec/codec_test.go | 8 +- pkg/grpc/proxy/handler_test.go | 128 +++---- pkg/grpc/server_test.go | 12 +- pkg/grpc/universalapi/api_lock.go | 22 +- pkg/grpc/universalapi/api_metadata.go | 6 +- pkg/grpc/universalapi/api_metadata_test.go | 2 +- pkg/grpc/universalapi/api_secrets.go | 30 +- pkg/grpc/universalapi/api_secrets_test.go | 24 +- pkg/grpc/universalapi/api_shutdown_test.go | 4 +- pkg/grpc/universalapi/api_state_query.go | 16 +- .../api_subtlecrypto_subtlecrypto_test.go | 64 ++-- pkg/grpc/universalapi/api_workflow.go | 76 ++-- pkg/grpc/universalapi/api_workflow_test.go | 38 +- pkg/health/health_test.go | 20 +- pkg/http/api.go | 28 +- pkg/http/api_directmessaging.go | 8 +- pkg/http/api_directmessaging_test.go | 12 +- pkg/http/api_metadata.go | 56 +-- pkg/http/api_secrets.go | 12 +- pkg/http/api_test.go | 80 ++-- pkg/http/api_workflow.go | 2 +- pkg/http/server_test.go | 8 +- pkg/http/universalapi_test.go | 4 +- pkg/http/util_test.go | 27 +- .../namenamespacematcher_test.go | 12 +- pkg/injector/patcher/sidecar.go | 38 +- .../patcher/sidecar_container_test.go | 16 +- pkg/injector/patcher/sidecar_patcher_test.go | 4 +- pkg/injector/patcher/sidecar_volumes_test.go | 4 +- pkg/injector/sentry/sentry.go | 2 +- pkg/injector/service/config.go | 4 +- pkg/injector/service/config_test.go | 11 +- pkg/injector/service/handler_test.go | 11 +- pkg/injector/service/injector_test.go | 30 +- pkg/messaging/direct_messaging.go | 30 +- pkg/messaging/direct_messaging_test.go | 44 +-- pkg/messaging/grpc_proxy_test.go | 17 +- pkg/messaging/v1/invoke_method_request.go | 32 +- .../v1/invoke_method_request_test.go | 202 +++++----- pkg/messaging/v1/invoke_method_response.go | 26 +- .../v1/invoke_method_response_test.go | 196 +++++----- pkg/messaging/v1/replayable_request_test.go | 44 +-- pkg/messaging/v1/util.go | 30 +- pkg/messaging/v1/util_test.go | 36 +- pkg/metrics/exporter_test.go | 5 +- pkg/nethttpadaptor/nethttpadaptor_test.go | 2 +- pkg/operator/api/api.go | 54 +-- pkg/operator/api/api_test.go | 107 +++--- pkg/operator/handlers/dapr_handler_test.go | 20 +- pkg/placement/ha_test.go | 2 +- pkg/placement/hashing/consistent_hash_test.go | 9 +- pkg/placement/membership.go | 8 +- pkg/placement/membership_test.go | 32 +- pkg/placement/placement.go | 32 +- pkg/placement/placement_test.go | 18 +- pkg/placement/raft/fsm.go | 10 +- pkg/placement/raft/fsm_test.go | 23 +- pkg/placement/raft/snapshot_test.go | 5 +- pkg/placement/raft/state_test.go | 40 +- pkg/placement/raft/util_test.go | 21 +- pkg/resiliency/breaker/circuitbreaker_test.go | 4 +- pkg/resiliency/policy_test.go | 9 +- pkg/resiliency/resiliency_test.go | 14 +- pkg/responsewriter/response_writer_test.go | 72 ++-- pkg/runtime/channels/channels_test.go | 10 +- pkg/runtime/config_test.go | 8 +- pkg/runtime/meta/meta_test.go | 29 +- pkg/runtime/processor/binding/init_test.go | 12 +- pkg/runtime/processor/binding/send.go | 24 +- pkg/runtime/processor/binding/send_test.go | 32 +- pkg/runtime/processor/processor_test.go | 27 +- .../processor/pubsub/bulk_subscriber.go | 34 +- .../processor/pubsub/bulk_subscriber_test.go | 205 +++++----- .../pubsub/bulksub_resiliency_test.go | 107 +++--- pkg/runtime/processor/pubsub/publish.go | 8 +- pkg/runtime/processor/pubsub/publish_test.go | 38 +- pkg/runtime/processor/pubsub/pubsub.go | 4 +- pkg/runtime/processor/pubsub/pubsub_test.go | 128 +++---- pkg/runtime/processor/secret/secret_test.go | 5 +- pkg/runtime/processor/state/state_test.go | 9 +- .../pubsub/bulkpublish_resiliency_test.go | 43 ++- pkg/runtime/pubsub/cloudevents_test.go | 9 +- pkg/runtime/pubsub/default_bulkpub_test.go | 7 +- pkg/runtime/pubsub/default_bulksub_test.go | 7 +- pkg/runtime/pubsub/outbox_test.go | 42 +- pkg/runtime/pubsub/subscriptions.go | 36 +- pkg/runtime/pubsub/subscriptions_test.go | 69 ++-- pkg/runtime/runtime_test.go | 106 +++--- pkg/runtime/wfengine/activity.go | 2 +- pkg/runtime/wfengine/backend.go | 2 +- pkg/runtime/wfengine/component.go | 4 +- pkg/runtime/wfengine/wfengine_test.go | 140 ++++--- pkg/runtime/wfengine/workflow.go | 24 +- pkg/runtime/wfengine/workflowstate_test.go | 12 +- pkg/security/legacy/legacy_test.go | 24 +- pkg/security/security_test.go | 6 +- pkg/security/x509source.go | 16 +- pkg/sentry/server/ca/ca_test.go | 8 +- pkg/sentry/server/ca/selfhosted_test.go | 14 +- pkg/sentry/server/server.go | 38 +- pkg/sentry/server/server_test.go | 10 +- .../server/validator/internal/common.go | 8 +- pkg/sentry/server/validator/jwks/jwks.go | 6 +- .../server/validator/kubernetes/kubernetes.go | 16 +- pkg/testing/directmessaging_mock.go | 6 +- pkg/testing/grpc/server.go | 5 +- pkg/validation/validation_test.go | 17 +- tests/apps/binding_input_grpc/app.go | 10 +- tests/apps/binding_output/app.go | 2 +- tests/apps/configurationapp/app.go | 4 +- tests/apps/healthapp/app.go | 18 +- tests/apps/perf/configuration/app.go | 2 +- tests/apps/pubsub-publisher/app.go | 8 +- .../pubsub-subscriber-routing_grpc/app.go | 18 +- tests/apps/pubsub-subscriber_grpc/app.go | 102 ++--- tests/apps/service_invocation/app.go | 12 +- tests/apps/service_invocation_grpc/app.go | 6 +- tests/apps/stateapp/app.go | 50 +-- .../e2e/actor_features/actor_features_test.go | 2 +- .../actor_invocation/actor_invocation_test.go | 4 +- .../actor_reentrancy/actor_reentrancy_test.go | 2 +- tests/e2e/actor_state/actor_state_test.go | 58 +-- tests/e2e/workflows/workflow_test.go | 18 +- tests/integration/framework/binary/binary.go | 5 +- .../framework/iowriter/iowriter_test.go | 18 +- .../framework/process/daprd/daprd.go | 20 +- .../framework/process/exec/exec.go | 8 +- .../framework/process/exec/kill/kill_posix.go | 4 +- .../framework/process/grpc/grpc.go | 5 + .../framework/process/operator/operator.go | 4 + .../framework/process/placement/placement.go | 4 + .../integration/framework/process/process.go | 3 + .../framework/process/sentry/sentry.go | 6 +- .../framework/process/statestore/component.go | 98 ++--- tests/integration/suite/actors/grpc/ttl.go | 17 +- .../healthz/deactivate-on-placement-fail.go | 7 +- .../suite/actors/healthz/healthz.go | 4 +- .../suite/actors/healthz/initerror.go | 9 +- tests/integration/suite/actors/http/ttl.go | 14 +- .../suite/actors/metadata/client.go | 3 +- .../integration/suite/actors/metadata/host.go | 3 +- .../suite/actors/metadata/shared.go | 14 +- .../suite/actors/reminders/basic.go | 9 +- .../suite/actors/reminders/rebalancing.go | 4 +- .../suite/daprd/httpserver/httpserver.go | 2 +- .../suite/daprd/metrics/metrics.go | 4 +- .../suite/daprd/mtls/kubernetes/disable.go | 23 +- .../suite/daprd/mtls/kubernetes/enable.go | 21 +- .../suite/daprd/mtls/kubernetes/operator.go | 3 +- .../suite/daprd/mtls/standalone/disable.go | 19 +- .../suite/daprd/mtls/standalone/enable.go | 17 +- .../suite/daprd/outbox/grpc/basic.go | 5 +- .../suite/daprd/outbox/http/basic.go | 1 + .../suite/daprd/pluggable/basic.go | 46 +-- .../suite/daprd/pubsub/grpc/compname.go | 3 +- .../daprd/resiliency/apps/defaulttimeout.go | 2 +- .../suite/daprd/resources/uniquename.go | 3 +- .../daprd/serviceinvocation/grpc/basic.go | 30 +- .../daprd/serviceinvocation/grpc/fuzz.go | 4 +- .../serviceinvocation/grpc/slowappstartup.go | 7 +- .../daprd/serviceinvocation/http/basic.go | 10 +- .../suite/daprd/state/grpc/basic.go | 3 +- .../suite/daprd/state/grpc/compname.go | 6 +- .../suite/daprd/state/grpc/fuzz.go | 14 +- .../integration/suite/daprd/state/grpc/ttl.go | 7 +- .../integration/suite/daprd/state/http/ttl.go | 6 +- .../suite/placement/apilevel/shared.go | 10 +- .../integration/suite/placement/authz/mtls.go | 19 +- .../suite/placement/authz/nomtls.go | 12 +- .../suite/placement/quorum/insecure.go | 12 +- .../suite/placement/quorum/jwks.go | 14 +- .../suite/placement/quorum/notls.go | 11 +- tests/integration/suite/ports/operator.go | 4 +- .../suite/sentry/metrics/expiry.go | 2 +- .../sentry/validator/insecure/insecure.go | 8 +- .../suite/sentry/validator/jwks/jwks.go | 2 +- .../suite/sentry/validator/jwks/utils.go | 4 +- .../sentry/validator/kubernetes/kubernetes.go | 8 +- .../sentry/validator/kubernetes/longname.go | 2 +- tests/perf/test_params_test.go | 10 +- tests/platforms/kubernetes/appmanager_test.go | 39 +- .../kubernetes/daprcomponent_test.go | 14 +- tests/runner/loadtest/fortio_test.go | 23 +- tests/runner/loadtest/k6_client_test.go | 18 +- tests/runner/loadtest/k6_test.go | 32 +- tests/runner/testresource_test.go | 13 +- utils/host_test.go | 5 +- utils/resolvconf_test.go | 2 +- utils/utils_test.go | 21 +- 262 files changed, 3274 insertions(+), 3202 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 1cc077040c4..e7fb5af6d36 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -121,34 +121,58 @@ linters-settings: # minimal occurrences count to trigger, 3 by default min-occurrences: 5 depguard: - list-type: denylist - include-go-root: false - packages-with-error-message: - - "github.com/Sirupsen/logrus": "must use github.com/dapr/kit/logger" - - "github.com/agrea/ptr": "must use github.com/dapr/kit/ptr" - - "go.uber.org/atomic": "must use sync/atomic" - - "golang.org/x/net/context": "must use context" - - "github.com/pkg/errors": "must use standard library (errors package and/or fmt.Errorf)" - - "github.com/go-chi/chi$": "must use github.com/go-chi/chi/v5" - - "github.com/cenkalti/backoff$": "must use github.com/cenkalti/backoff/v4" - - "github.com/cenkalti/backoff/v2": "must use github.com/cenkalti/backoff/v4" - - "github.com/cenkalti/backoff/v3": "must use github.com/cenkalti/backoff/v4" - - "github.com/benbjohnson/clock": "must use k8s.io/utils/clock" - - "github.com/ghodss/yaml": "must use sigs.k8s.io/yaml" - - "gopkg.in/yaml.v2": "must use gopkg.in/yaml.v3" - - "github.com/golang-jwt/jwt": "must use github.com/lestrrat-go/jwx/v2" - - "github.com/golang-jwt/jwt/v2": "must use github.com/lestrrat-go/jwx/v2" - - "github.com/golang-jwt/jwt/v3": "must use github.com/lestrrat-go/jwx/v2" - - "github.com/golang-jwt/jwt/v4": "must use github.com/lestrrat-go/jwx/v2" - # Commonly auto-completed by gopls - - "github.com/gogo/status": "must use google.golang.org/grpc/status" - - "github.com/gogo/protobuf": "must use google.golang.org/protobuf" - - "github.com/lestrrat-go/jwx/jwa": "must use github.com/lestrrat-go/jwx/v2" - - "github.com/lestrrat-go/jwx/jwt": "must use github.com/lestrrat-go/jwx/v2" - - "github.com/labstack/gommon/log": "must use github.com/dapr/kit/logger" - - "github.com/gobuffalo/logger": "must use github.com/dapr/kit/logger" - - "k8s.io/utils/pointer": "must use github.com/dapr/kit/ptr" - - "k8s.io/utils/ptr": "must use github.com/dapr/kit/ptr" + rules: + master: + deny: + - pkg: "github.com/Sirupsen/logrus" + desc: "must use github.com/dapr/kit/logger" + - pkg: "github.com/agrea/ptr" + desc: "must use github.com/dapr/kit/ptr" + - pkg: "go.uber.org/atomic" + desc: "must use sync/atomic" + - pkg: "golang.org/x/net/context" + desc: "must use context" + - pkg: "github.com/pkg/errors" + desc: "must use standard library (errors package and/or fmt.Errorf)" + - pkg: "github.com/go-chi/chi$" + desc: "must use github.com/go-chi/chi/v5" + - pkg: "github.com/cenkalti/backoff$" + desc: "must use github.com/cenkalti/backoff/v4" + - pkg: "github.com/cenkalti/backoff/v2" + desc: "must use github.com/cenkalti/backoff/v4" + - pkg: "github.com/cenkalti/backoff/v3" + desc: "must use github.com/cenkalti/backoff/v4" + - pkg: "github.com/benbjohnson/clock" + desc: "must use k8s.io/utils/clock" + - pkg: "github.com/ghodss/yaml" + desc: "must use sigs.k8s.io/yaml" + - pkg: "gopkg.in/yaml.v2" + desc: "must use gopkg.in/yaml.v3" + - pkg: "github.com/golang-jwt/jwt" + desc: "must use github.com/lestrrat-go/jwx/v2" + - pkg: "github.com/golang-jwt/jwt/v2" + desc: "must use github.com/lestrrat-go/jwx/v2" + - pkg: "github.com/golang-jwt/jwt/v3" + desc: "must use github.com/lestrrat-go/jwx/v2" + - pkg: "github.com/golang-jwt/jwt/v4" + desc: "must use github.com/lestrrat-go/jwx/v2" + # pkg: Commonly auto-completed by gopls + - pkg: "github.com/gogo/status" + desc: "must use google.golang.org/grpc/status" + - pkg: "github.com/gogo/protobuf" + desc: "must use google.golang.org/protobuf" + - pkg: "github.com/lestrrat-go/jwx/jwa" + desc: "must use github.com/lestrrat-go/jwx/v2" + - pkg: "github.com/lestrrat-go/jwx/jwt" + desc: "must use github.com/lestrrat-go/jwx/v2" + - pkg: "github.com/labstack/gommon/log" + desc: "must use github.com/dapr/kit/logger" + - pkg: "github.com/gobuffalo/logger" + desc: "must use github.com/dapr/kit/logger" + - pkg: "k8s.io/utils/pointer" + desc: "must use github.com/dapr/kit/ptr" + - pkg: "k8s.io/utils/ptr" + desc: "must use github.com/dapr/kit/ptr" misspell: # Correct spellings using locale preferences for US or UK. # Default is to use a neutral variety of English. @@ -305,3 +329,4 @@ linters: - structcheck - varcheck - deadcode + - inamedparam diff --git a/pkg/acl/acl_test.go b/pkg/acl/acl_test.go index 2fe75e83ebe..429407de1aa 100644 --- a/pkg/acl/acl_test.go +++ b/pkg/acl/acl_test.go @@ -299,7 +299,7 @@ func TestParseAccessControlSpec(t *testing.T) { } _, err := ParseAccessControlSpec(invalidAccessControlSpec, true) - assert.Error(t, err, "invalid access control spec. missing trustdomain for apps: [%s], missing namespace for apps: [%s], missing app name on at least one of the app policies: true", app1, app2) + require.Error(t, err, "invalid access control spec. missing trustdomain for apps: [%s], missing namespace for apps: [%s], missing app name on at least one of the app policies: true", app1, app2) }) t.Run("test when no trust domain is specified for the app", func(t *testing.T) { @@ -374,7 +374,7 @@ func TestParseAccessControlSpec(t *testing.T) { } accessControlList, _ := ParseAccessControlSpec(invalidAccessControlSpec, true) - assert.Equal(t, accessControlList.DefaultAction, config.DenyAccess) + assert.Equal(t, config.DenyAccess, accessControlList.DefaultAction) }) } @@ -384,7 +384,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { t.Run("test when no acl specified", func(t *testing.T) { srcAppID := app1 spiffeID, err := spiffe.FromStrings(td, "ns1", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op1", common.HTTPExtension_POST, true, nil) // Action = Allow the operation since no ACL is defined assert.True(t, isAllowed) @@ -394,7 +394,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { srcAppID := "appX" accessControlList, _ := initializeAccessControlList(true) spiffeID, err := spiffe.FromStrings(td, "ns1", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op1", common.HTTPExtension_POST, true, accessControlList) // Action = Default global action assert.False(t, isAllowed) @@ -404,7 +404,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { srcAppID := app1 accessControlList, _ := initializeAccessControlList(true) spiffeID, err := spiffe.FromStrings(privateTD, "ns1", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op1", common.HTTPExtension_POST, true, accessControlList) // Action = Ignore policy and apply global default action @@ -415,7 +415,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { srcAppID := app1 accessControlList, _ := initializeAccessControlList(true) spiffeID, err := spiffe.FromStrings(td, "abcd", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op1", common.HTTPExtension_POST, true, accessControlList) // Action = Ignore policy and apply global default action assert.False(t, isAllowed) @@ -439,7 +439,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { srcAppID := app1 accessControlList, _ := initializeAccessControlList(true) spiffeID, err := spiffe.FromStrings(td, "ns1", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "opX", common.HTTPExtension_POST, true, accessControlList) // Action = Ignore policy and apply default action for app assert.True(t, isAllowed) @@ -449,7 +449,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { srcAppID := app1 accessControlList, _ := initializeAccessControlList(true) spiffeID, err := spiffe.FromStrings(td, "ns1", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "Op2", common.HTTPExtension_POST, true, accessControlList) // Action = Ignore policy and apply default action for app assert.False(t, isAllowed) @@ -460,7 +460,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(true) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op4", common.HTTPExtension_PUT, true, accessControlList) // Action = Default action for the specific app @@ -472,7 +472,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(true) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns1", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op5", common.HTTPExtension_PUT, true, accessControlList) // Action = Global Default action assert.False(t, isAllowed) @@ -482,7 +482,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { srcAppID := app1 accessControlList, _ := initializeAccessControlList(true) spiffeID, err := spiffe.FromStrings(td, "ns1", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op2", common.HTTPExtension_PUT, true, accessControlList) // Action = Default action for the specific verb assert.False(t, isAllowed) @@ -493,7 +493,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(true) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op4", common.HTTPExtension_POST, true, accessControlList) // Action = Default action for the specific verb assert.True(t, isAllowed) @@ -504,7 +504,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(true) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "/op4", common.HTTPExtension_POST, true, accessControlList) // Action = Default action for the specific verb assert.True(t, isAllowed) @@ -515,7 +515,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(true) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op4", common.HTTPExtension_NONE, true, accessControlList) // Action = Default action for the app assert.False(t, isAllowed) @@ -526,7 +526,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(true) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "/op3/a", common.HTTPExtension_PUT, true, accessControlList) // Action = Default action for the specific verb assert.True(t, isAllowed) @@ -537,7 +537,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(false) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "/OP4", common.HTTPExtension_NONE, false, accessControlList) // Action = Default action for the specific verb assert.False(t, isAllowed) @@ -548,7 +548,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(true) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "/op3/b/b", common.HTTPExtension_PUT, true, accessControlList) // Action = Default action for the app assert.False(t, isAllowed) @@ -559,7 +559,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(true) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "/op3/a/b", common.HTTPExtension_PUT, true, accessControlList) // Action = Default action for the app assert.True(t, isAllowed) @@ -570,7 +570,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(false) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op4", common.HTTPExtension_NONE, false, accessControlList) // Action = Default action for the app assert.True(t, isAllowed) @@ -581,7 +581,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(false) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op6", common.HTTPExtension_NONE, false, accessControlList) // Action = Default action for the app assert.True(t, isAllowed) @@ -592,7 +592,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(false) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op7/a/b/c", common.HTTPExtension_NONE, false, accessControlList) assert.True(t, isAllowed) @@ -608,7 +608,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(false) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op7/a/bc", common.HTTPExtension_NONE, false, accessControlList) assert.True(t, isAllowed) }) @@ -618,7 +618,7 @@ func Test_isOperationAllowedByAccessControlPolicy(t *testing.T) { accessControlList, _ := initializeAccessControlList(false) domain1TD := spiffeid.RequireTrustDomainFromString("domain1") spiffeID, err := spiffe.FromStrings(domain1TD, "ns2", srcAppID) - assert.NoError(t, err) + require.NoError(t, err) isAllowed, _ := isOperationAllowedByAccessControlPolicy(spiffeID, "op7/c/d/e", common.HTTPExtension_NONE, false, accessControlList) assert.True(t, isAllowed) }) diff --git a/pkg/actors/actor_lock_test.go b/pkg/actors/actor_lock_test.go index 01b40378c23..1f62cbfc566 100644 --- a/pkg/actors/actor_lock_test.go +++ b/pkg/actors/actor_lock_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var baseID = "test" @@ -30,7 +31,7 @@ func TestLockBaseCase(t *testing.T) { err := lock.Lock(requestID) - assert.NoError(t, err) + require.NoError(t, err) if requestID == nil { assert.Nil(t, lock.activeRequest) } else { @@ -52,7 +53,7 @@ func TestLockBypassWithMatchingID(t *testing.T) { for i := 1; i < 5; i++ { err := lock.Lock(requestID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, *requestID, *lock.activeRequest) assert.Equal(t, int32(i), lock.stackDepth.Load()) } @@ -85,13 +86,13 @@ func TestStackDepthLimit(t *testing.T) { err := lock.Lock(requestID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, *requestID, *lock.activeRequest) assert.Equal(t, int32(1), lock.stackDepth.Load()) err = lock.Lock(requestID) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, "maximum stack depth exceeded", err.Error()) } diff --git a/pkg/actors/actor_test.go b/pkg/actors/actor_test.go index 2d5798951e0..0f947bbbafa 100644 --- a/pkg/actors/actor_test.go +++ b/pkg/actors/actor_test.go @@ -29,7 +29,7 @@ func TestIsBusy(t *testing.T) { testActor := newActor("testType", "testID", &reentrancyStackDepth, time.Second, nil) testActor.lock(nil) - assert.Equal(t, true, testActor.isBusy()) + assert.True(t, testActor.isBusy()) testActor.unlock() } @@ -38,7 +38,7 @@ func TestTurnBasedConcurrencyLocks(t *testing.T) { // first lock testActor.lock(nil) - assert.Equal(t, true, testActor.isBusy()) + assert.True(t, testActor.isBusy()) firstIdleAt := *testActor.idleAt.Load() waitCh := make(chan bool) @@ -70,7 +70,7 @@ func TestTurnBasedConcurrencyLocks(t *testing.T) { assert.Equal(t, int32(0), testActor.pendingActorCalls.Load()) assert.False(t, testActor.isBusy()) - assert.True(t, testActor.idleAt.Load().Sub(firstIdleAt) >= 10*time.Millisecond) + assert.GreaterOrEqual(t, testActor.idleAt.Load().Sub(firstIdleAt), 10*time.Millisecond) } func TestDisposedActor(t *testing.T) { diff --git a/pkg/actors/actors.go b/pkg/actors/actors.go index 54586cf3756..338bace452f 100644 --- a/pkg/actors/actors.go +++ b/pkg/actors/actors.go @@ -382,8 +382,8 @@ func (a *actorsRuntime) deactivateActor(act *actor) error { } defer resp.Close() - if resp.Status().Code != http.StatusOK { - diag.DefaultMonitoring.ActorDeactivationFailed(act.actorType, "status_code_"+strconv.FormatInt(int64(resp.Status().Code), 10)) + if resp.Status().GetCode() != http.StatusOK { + diag.DefaultMonitoring.ActorDeactivationFailed(act.actorType, "status_code_"+strconv.FormatInt(int64(resp.Status().GetCode()), 10)) body, _ := resp.RawDataFull() return fmt.Errorf("error from actor service: %s", string(body)) } @@ -486,7 +486,7 @@ func (a *actorsRuntime) callRemoteActorWithRetry( fn func(ctx context.Context, targetAddress, targetID string, req *invokev1.InvokeMethodRequest) (*invokev1.InvokeMethodResponse, func(destroy bool), error), targetAddress, targetID string, req *invokev1.InvokeMethodRequest, ) (*invokev1.InvokeMethodResponse, error) { - if !a.resiliency.PolicyDefined(req.Actor().ActorType, resiliency.ActorPolicy{}) { + if !a.resiliency.PolicyDefined(req.Actor().GetActorType(), resiliency.ActorPolicy{}) { // This policy has built-in retries so enable replay in the request req.WithReplay(true) policyRunner := resiliency.NewRunnerWithOptions(ctx, @@ -529,9 +529,9 @@ func (a *actorsRuntime) getOrCreateActor(act *internalv1pb.Actor) *actor { val, ok := a.actorsTable.Load(key) if !ok { actorInstance := newActor( - act.ActorType, act.ActorId, - a.actorsConfig.GetReentrancyForType(act.ActorType).MaxStackDepth, - a.actorsConfig.GetIdleTimeoutForType(act.ActorType), + act.GetActorType(), act.GetActorId(), + a.actorsConfig.GetReentrancyForType(act.GetActorType()).MaxStackDepth, + a.actorsConfig.GetIdleTimeoutForType(act.GetActorType()), a.clock, ) val, _ = a.actorsTable.LoadOrStore(key, actorInstance) @@ -571,8 +571,8 @@ func (a *actorsRuntime) callLocalActor(ctx context.Context, req *invokev1.Invoke // Replace method to actors method. msg := req.Message() - originalMethod := msg.Method - msg.Method = "actors/" + actorTypeID.ActorType + "/" + actorTypeID.ActorId + "/method/" + msg.Method + originalMethod := msg.GetMethod() + msg.Method = "actors/" + actorTypeID.GetActorType() + "/" + actorTypeID.GetActorId() + "/method/" + msg.GetMethod() // Reset the method so we can perform retries. defer func() { @@ -614,7 +614,7 @@ func (a *actorsRuntime) callLocalActor(ctx context.Context, req *invokev1.Invoke return nil, errors.New("error from actor service: response object is nil") } - if resp.Status().Code != http.StatusOK { + if resp.Status().GetCode() != http.StatusOK { respData, _ := resp.RawDataFull() return nil, fmt.Errorf("error from actor service: %s", string(respData)) } diff --git a/pkg/actors/actors_test.go b/pkg/actors/actors_test.go index 1809cbbcb7d..540423b2d10 100644 --- a/pkg/actors/actors_test.go +++ b/pkg/actors/actors_test.go @@ -128,14 +128,14 @@ type reentrantAppChannel struct { } func (r *reentrantAppChannel) InvokeMethod(ctx context.Context, req *invokev1.InvokeMethodRequest, appID string) (*invokev1.InvokeMethodResponse, error) { - r.callLog = append(r.callLog, "Entering "+req.Message().Method) + r.callLog = append(r.callLog, "Entering "+req.Message().GetMethod()) if len(r.nextCall) > 0 { nextReq := r.nextCall[0] r.nextCall = r.nextCall[1:] if val, ok := req.Metadata()["Dapr-Reentrancy-Id"]; ok { nextReq.AddMetadata(map[string][]string{ - "Dapr-Reentrancy-Id": val.Values, + "Dapr-Reentrancy-Id": val.GetValues(), }) } resp, err := r.a.callLocalActor(context.Background(), nextReq) @@ -144,7 +144,7 @@ func (r *reentrantAppChannel) InvokeMethod(ctx context.Context, req *invokev1.In } defer resp.Close() } - r.callLog = append(r.callLog, "Exiting "+req.Message().Method) + r.callLog = append(r.callLog, "Exiting "+req.Message().GetMethod()) return invokev1.NewInvokeMethodResponse(200, "OK", nil), nil } @@ -460,7 +460,7 @@ func TestTimerExecution(t *testing.T) { Callback: "callback", Data: json.RawMessage(`"data"`), }, true) - assert.NoError(t, err) + require.NoError(t, err) } func TestReminderExecution(t *testing.T) { @@ -479,7 +479,7 @@ func TestReminderExecution(t *testing.T) { Name: "reminder1", Data: json.RawMessage(`"data"`), }, false) - assert.NoError(t, err) + require.NoError(t, err) } func TestConstructActorStateKey(t *testing.T) { @@ -498,7 +498,7 @@ func TestConstructActorStateKey(t *testing.T) { // Check split keys := strings.Split(stateKey, delim) - assert.Equal(t, 4, len(keys)) + assert.Len(t, keys, 4) assert.Equal(t, TestAppID, keys[0]) assert.Equal(t, actorType, keys[1]) assert.Equal(t, actorID, keys[2]) @@ -657,7 +657,7 @@ func TestDeleteState(t *testing.T) { }) // assert - assert.NoError(t, err) + require.NoError(t, err) assert.Nilf(t, response.Data, "expected nil, but got %s", string(response.Data)) } @@ -772,12 +772,12 @@ func TestTransactionalOperation(t *testing.T) { _, err := op.StateOperation("base||", StateOperationOpts{ StateTTLEnabled: false, }) - assert.ErrorContains(t, err, `ttlInSeconds is not supported without the "ActorStateTTL" feature enabled`) + require.ErrorContains(t, err, `ttlInSeconds is not supported without the "ActorStateTTL" feature enabled`) resI, err := op.StateOperation("base||", StateOperationOpts{ StateTTLEnabled: true, }) - assert.NoError(t, err) + require.NoError(t, err) res, ok := resI.(state.SetRequest) require.True(t, ok) @@ -800,7 +800,7 @@ func TestCallLocalActor(t *testing.T) { defer testActorsRuntime.Close() resp, err := testActorsRuntime.callLocalActor(context.Background(), req) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, resp) defer resp.Close() }) @@ -858,7 +858,7 @@ func TestTransactionalState(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("Multiple requests succeeds", func(t *testing.T) { @@ -888,7 +888,7 @@ func TestTransactionalState(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("Too many requests fail", func(t *testing.T) { @@ -920,8 +920,8 @@ func TestTransactionalState(t *testing.T) { ActorID: actorID, Operations: ops, }) - assert.Error(t, err) - assert.ErrorIs(t, err, ErrTransactionsTooManyOperations) + require.Error(t, err) + require.ErrorIs(t, err, ErrTransactionsTooManyOperations) }) t.Run("Wrong request body - should fail", func(t *testing.T) { @@ -942,7 +942,7 @@ func TestTransactionalState(t *testing.T) { }, }, }) - assert.NotNil(t, err) + require.Error(t, err) }) t.Run("Unsupported operation type - should fail", func(t *testing.T) { @@ -961,7 +961,7 @@ func TestTransactionalState(t *testing.T) { }, }, }) - assert.EqualError(t, err, "operation type Wrong not supported") + require.EqualError(t, err, "operation type Wrong not supported") }) } @@ -1123,27 +1123,27 @@ func TestConstructCompositeKeyWithThreeArgs(t *testing.T) { func TestHostValidation(t *testing.T) { t.Run("kubernetes mode with mTLS, missing namespace", func(t *testing.T) { err := ValidateHostEnvironment(true, modes.KubernetesMode, "") - assert.Error(t, err) + require.Error(t, err) }) t.Run("kubernetes mode without mTLS, missing namespace", func(t *testing.T) { err := ValidateHostEnvironment(false, modes.KubernetesMode, "") - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("kubernetes mode with mTLS and namespace", func(t *testing.T) { err := ValidateHostEnvironment(true, modes.KubernetesMode, "default") - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("self hosted mode with mTLS, missing namespace", func(t *testing.T) { err := ValidateHostEnvironment(true, modes.StandaloneMode, "") - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("self hosted mode without mTLS, missing namespace", func(t *testing.T) { err := ValidateHostEnvironment(false, modes.StandaloneMode, "") - assert.NoError(t, err) + require.NoError(t, err) }) } @@ -1171,7 +1171,7 @@ func TestBasicReentrantActorLocking(t *testing.T) { reentrantAppChannel.a = testActorsRuntime resp, err := testActorsRuntime.callLocalActor(context.Background(), req) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, resp) defer resp.Close() assert.Equal(t, []string{ @@ -1206,7 +1206,7 @@ func TestReentrantActorLockingOverMultipleActors(t *testing.T) { reentrantAppChannel.a = testActorsRuntime resp, err := testActorsRuntime.callLocalActor(context.Background(), req) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, resp) defer resp.Close() assert.Equal(t, []string{ @@ -1240,7 +1240,7 @@ func TestReentrancyStackLimit(t *testing.T) { resp, err := testActorsRuntime.callLocalActor(context.Background(), req) assert.Nil(t, resp) - assert.Error(t, err) + require.Error(t, err) } func TestReentrancyPerActor(t *testing.T) { @@ -1275,7 +1275,7 @@ func TestReentrancyPerActor(t *testing.T) { reentrantAppChannel.a = testActorsRuntime resp, err := testActorsRuntime.callLocalActor(context.Background(), req) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, resp) defer resp.Close() assert.Equal(t, []string{ @@ -1317,7 +1317,7 @@ func TestReentrancyStackLimitPerActor(t *testing.T) { resp, err := testActorsRuntime.callLocalActor(context.Background(), req) assert.Nil(t, resp) - assert.Error(t, err) + require.Error(t, err) } func TestActorsRuntimeResiliency(t *testing.T) { @@ -1348,7 +1348,7 @@ func TestActorsRuntimeResiliency(t *testing.T) { map[string]int{}, ), KeyFunc: func(req *invokev1.InvokeMethodRequest) string { - return req.Actor().ActorId + return req.Actor().GetActorId() }, } builder := runtimeBuilder{ @@ -1370,7 +1370,7 @@ func TestActorsRuntimeResiliency(t *testing.T) { resp, err := runtime.callLocalActor(context.Background(), req) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, resp) assert.Equal(t, 1, failingAppChannel.Failure.CallCount("timeoutId")) assert.Less(t, end.Sub(start), time.Second*10) @@ -1385,7 +1385,7 @@ func TestActorsRuntimeResiliency(t *testing.T) { _, err := runtime.GetState(context.Background(), req) callKey := constructCompositeKey(TestAppID, actorType, actorID, "failingGetStateKey") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingState.Failure.CallCount(callKey)) }) @@ -1400,7 +1400,7 @@ func TestActorsRuntimeResiliency(t *testing.T) { end := time.Now() callKey := constructCompositeKey(TestAppID, actorType, actorID, "timeoutGetStateKey") - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingState.Failure.CallCount(callKey)) assert.Less(t, end.Sub(start), time.Second*10) }) @@ -1422,7 +1422,7 @@ func TestActorsRuntimeResiliency(t *testing.T) { err := runtime.TransactionalStateOperation(context.Background(), req) callKey := constructCompositeKey(TestAppID, actorType, actorID, "failingMultiKey") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingState.Failure.CallCount(callKey)) }) @@ -1445,7 +1445,7 @@ func TestActorsRuntimeResiliency(t *testing.T) { end := time.Now() callKey := constructCompositeKey(TestAppID, actorType, actorID, "timeoutMultiKey") - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingState.Failure.CallCount(callKey)) assert.Less(t, end.Sub(start), time.Second*10) }) @@ -1457,7 +1457,7 @@ func TestActorsRuntimeResiliency(t *testing.T) { }) callKey := constructCompositeKey("actors", actorType) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingState.Failure.CallCount(callKey)) // Key will no longer fail, so now we can check the timeout. @@ -1468,7 +1468,7 @@ func TestActorsRuntimeResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 4, failingState.Failure.CallCount(callKey)) // Should be called 2 more times. assert.Less(t, end.Sub(start), time.Second*10) }) diff --git a/pkg/actors/config_test.go b/pkg/actors/config_test.go index 5da20f2c84b..b6b2fe321de 100644 --- a/pkg/actors/config_test.go +++ b/pkg/actors/config_test.go @@ -58,7 +58,7 @@ func TestConfig(t *testing.T) { assert.Equal(t, "1s", c.ActorDeactivationScanInterval.String()) assert.Equal(t, "2s", c.ActorIdleTimeout.String()) assert.Equal(t, "3s", c.DrainOngoingCallTimeout.String()) - assert.Equal(t, true, c.DrainRebalancedActors) + assert.True(t, c.DrainRebalancedActors) assert.Equal(t, "default", c.Namespace) assert.Equal(t, TestPodName, c.PodName) } diff --git a/pkg/actors/errors/actor_error.go b/pkg/actors/errors/actor_error.go index 93e45ce9a3b..ab7c2804929 100644 --- a/pkg/actors/errors/actor_error.go +++ b/pkg/actors/errors/actor_error.go @@ -41,7 +41,7 @@ func NewActorError(invokeResponse *invokev1.InvokeMethodResponse) error { return fmt.Errorf("could not read actor error: %s", err) } - statusCode := int(invokeResponse.Status().Code) + statusCode := int(invokeResponse.Status().GetCode()) if !invokeResponse.IsHTTPResponse() { statusCode = invokev1.HTTPStatusFromCode(codes.Code(statusCode)) } diff --git a/pkg/actors/internal/reminder_test.go b/pkg/actors/internal/reminder_test.go index 212bc60f1f9..3511edd0ed7 100644 --- a/pkg/actors/internal/reminder_test.go +++ b/pkg/actors/internal/reminder_test.go @@ -35,11 +35,11 @@ func TestReminderProperties(t *testing.T) { } t.Run("ActorKey", func(t *testing.T) { - require.Equal(t, r.ActorKey(), "type||id") + require.Equal(t, "type||id", r.ActorKey()) }) t.Run("Key", func(t *testing.T) { - require.Equal(t, r.Key(), "type||id||name") + require.Equal(t, "type||id||name", r.Key()) }) t.Run("NextTick", func(t *testing.T) { diff --git a/pkg/actors/internal_actor_test.go b/pkg/actors/internal_actor_test.go index 59926f307b6..8b19ea79a7a 100644 --- a/pkg/actors/internal_actor_test.go +++ b/pkg/actors/internal_actor_test.go @@ -159,23 +159,23 @@ func TestInternalActorCall(t *testing.T) { require.NoError(t, err) defer resp.Close() - if assert.NoError(t, err) && assert.NotNil(t, resp) { - // Verify the response metadata matches what we expect - assert.Equal(t, int32(200), resp.Status().Code) - contentType := resp.ContentType() - assert.Equal(t, invokev1.OctetStreamContentType, contentType) - - // Verify the actor got all the expected inputs (which are echoed back to us) - info, err := decodeTestResponse(resp.RawData()) - require.NoError(t, err) - require.NotNil(t, info) - assert.Equal(t, testActorID, info.ActorID) - assert.Equal(t, testMethod, info.MethodName) - assert.Equal(t, []byte(testInput), info.Input) - - // Verify the preconfigured output was successfully returned back to us - assert.Equal(t, testOutput, info.Output) - } + require.NoError(t, err) + assert.NotNil(t, resp) + // Verify the response metadata matches what we expect + assert.Equal(t, int32(200), resp.Status().GetCode()) + contentType := resp.ContentType() + assert.Equal(t, invokev1.OctetStreamContentType, contentType) + + // Verify the actor got all the expected inputs (which are echoed back to us) + info, err := decodeTestResponse(resp.RawData()) + require.NoError(t, err) + require.NotNil(t, info) + assert.Equal(t, testActorID, info.ActorID) + assert.Equal(t, testMethod, info.MethodName) + assert.Equal(t, []byte(testInput), info.Input) + + // Verify the preconfigured output was successfully returned back to us + assert.Equal(t, testOutput, info.Output) } func TestInternalActorReminder(t *testing.T) { @@ -238,7 +238,7 @@ func TestInternalActorDeactivation(t *testing.T) { require.NoError(t, err) defer resp.Close() - assert.NoError(t, err) + require.NoError(t, err) // Deactivate the actor, ensuring no errors and that the correct actor ID was provided. actAny, ok := testActorRuntime.actorsTable.Load(constructCompositeKey(testActorType, testActorID)) diff --git a/pkg/actors/placement/client_test.go b/pkg/actors/placement/client_test.go index 8ad1f376bc8..aa82fc00032 100644 --- a/pkg/actors/placement/client_test.go +++ b/pkg/actors/placement/client_test.go @@ -39,7 +39,7 @@ func TestConnectToServer(t *testing.T) { return []grpc.DialOption{}, nil }) - assert.NotNil(t, client.connectToServer(context.Background(), "")) + require.Error(t, client.connectToServer(context.Background(), "")) }) t.Run("when new placement stream returns an error connectToServer should return an error", func(t *testing.T) { client := newPlacementClient(func() ([]grpc.DialOption, error) { @@ -47,7 +47,7 @@ func TestConnectToServer(t *testing.T) { }) conn, cleanup := newTestServerWithOpts() // do not register the placement stream server defer cleanup() - assert.NotNil(t, client.connectToServer(context.Background(), conn)) + require.Error(t, client.connectToServer(context.Background(), conn)) }) t.Run("when connectToServer succeeds it should broadcast that a new connection is alive", func(t *testing.T) { conn, _, cleanup := newTestServer() // do not register the placement stream server @@ -64,7 +64,7 @@ func TestConnectToServer(t *testing.T) { ready.Done() }() - assert.Nil(t, client.connectToServer(context.Background(), conn)) + require.NoError(t, client.connectToServer(context.Background(), conn)) ready.Wait() // should not timeout assert.True(t, client.streamConnAlive) }) @@ -100,7 +100,7 @@ func TestDisconnect(t *testing.T) { defer cleanup() client := newPlacementClient(getGrpcOptsGetter([]string{conn}, testSecurity(t))) - assert.Nil(t, client.connectToServer(context.Background(), conn)) + require.NoError(t, client.connectToServer(context.Background(), conn)) called := false shouldBeCalled := func() { @@ -118,7 +118,7 @@ func TestDisconnect(t *testing.T) { }() client.disconnectFn(shouldBeCalled) ready.Wait() - assert.Equal(t, client.clientConn.GetState(), connectivity.Shutdown) + assert.Equal(t, connectivity.Shutdown, client.clientConn.GetState()) assert.True(t, called) }) } diff --git a/pkg/actors/placement/placement.go b/pkg/actors/placement/placement.go index 8b0d79102d1..034b7f0a8d8 100644 --- a/pkg/actors/placement/placement.go +++ b/pkg/actors/placement/placement.go @@ -473,14 +473,14 @@ func (p *actorPlacement) onPlacementError(err error) { } func (p *actorPlacement) onPlacementOrder(in *v1pb.PlacementOrder) { - log.Debugf("Placement order received: %s", in.Operation) - diag.DefaultMonitoring.ActorPlacementTableOperationReceived(in.Operation) + log.Debugf("Placement order received: %s", in.GetOperation()) + diag.DefaultMonitoring.ActorPlacementTableOperationReceived(in.GetOperation()) // lock all incoming calls when an updated table arrives p.operationUpdateLock.Lock() defer p.operationUpdateLock.Unlock() - switch in.Operation { + switch in.GetOperation() { case lockOperation: p.blockPlacements() @@ -498,7 +498,7 @@ func (p *actorPlacement) onPlacementOrder(in *v1pb.PlacementOrder) { p.unblockPlacements() case updateOperation: - p.updatePlacements(in.Tables) + p.updatePlacements(in.GetTables()) } } @@ -538,23 +538,23 @@ func (p *actorPlacement) updatePlacements(in *v1pb.PlacementTables) { p.placementTableLock.Lock() defer p.placementTableLock.Unlock() - if in.Version == p.placementTables.Version { + if in.GetVersion() == p.placementTables.Version { return } - if in.ApiLevel != p.apiLevel { - p.apiLevel = in.ApiLevel - updatedAPILevel = ptr.Of(in.ApiLevel) + if in.GetApiLevel() != p.apiLevel { + p.apiLevel = in.GetApiLevel() + updatedAPILevel = ptr.Of(in.GetApiLevel()) } maps.Clear(p.placementTables.Entries) - p.placementTables.Version = in.Version - for k, v := range in.Entries { - loadMap := make(map[string]*hashing.Host, len(v.LoadMap)) - for lk, lv := range v.LoadMap { - loadMap[lk] = hashing.NewHost(lv.Name, lv.Id, lv.Load, lv.Port) + p.placementTables.Version = in.GetVersion() + for k, v := range in.GetEntries() { + loadMap := make(map[string]*hashing.Host, len(v.GetLoadMap())) + for lk, lv := range v.GetLoadMap() { + loadMap[lk] = hashing.NewHost(lv.GetName(), lv.GetId(), lv.GetLoad(), lv.GetPort()) } - p.placementTables.Entries[k] = hashing.NewFromExisting(v.Hosts, v.SortedSet, loadMap) + p.placementTables.Entries[k] = hashing.NewFromExisting(v.GetHosts(), v.GetSortedSet(), loadMap) } updated = true diff --git a/pkg/actors/placement/placement_test.go b/pkg/actors/placement/placement_test.go index 4b1a0421088..d331c21682f 100644 --- a/pkg/actors/placement/placement_test.go +++ b/pkg/actors/placement/placement_test.go @@ -88,7 +88,7 @@ func TestPlacementStream_RoundRobin(t *testing.T) { require.NoError(t, testPlacement.Start(context.Background())) time.Sleep(statusReportHeartbeatInterval * 3) assert.Equal(t, leaderServer[0], testPlacement.serverIndex.Load()) - assert.True(t, testSrv[testPlacement.serverIndex.Load()].recvCount.Load() >= 2) + assert.GreaterOrEqual(t, testSrv[testPlacement.serverIndex.Load()].recvCount.Load(), int32(2)) }) t.Run("shutdown leader and find the next leader", func(t *testing.T) { @@ -103,7 +103,7 @@ func TestPlacementStream_RoundRobin(t *testing.T) { // wait until placement connect to the second leader node time.Sleep(statusReportHeartbeatInterval * 3) assert.Equal(t, leaderServer[1], testPlacement.serverIndex.Load()) - assert.True(t, testSrv[testPlacement.serverIndex.Load()].recvCount.Load() >= 1) + assert.GreaterOrEqual(t, testSrv[testPlacement.serverIndex.Load()].recvCount.Load(), int32(1)) }) // tear down @@ -143,12 +143,12 @@ func TestAppHealthyStatus(t *testing.T) { // wait until client sends heartbeat to the test server time.Sleep(statusReportHeartbeatInterval * 3) oldCount := testSrv.recvCount.Load() - assert.True(t, oldCount >= 2, "client must send at least twice") + assert.GreaterOrEqual(t, oldCount, int32(2), "client must send at least twice") // Mark app unhealthy appHealthCh <- false time.Sleep(statusReportHeartbeatInterval * 2) - assert.True(t, testSrv.recvCount.Load() <= oldCount+1, "no more +1 heartbeat because app is unhealthy") + assert.LessOrEqual(t, testSrv.recvCount.Load(), oldCount+1, "no more +1 heartbeat because app is unhealthy") // clean up close(appHealthCh) @@ -249,7 +249,7 @@ func TestWaitUntilPlacementTableIsReady(t *testing.T) { } err := testPlacement.WaitUntilReady(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("wait until ready", func(t *testing.T) { @@ -378,7 +378,7 @@ func TestLookupActor(t *testing.T) { ActorID: "test", }) require.Error(t, err) - assert.ErrorContains(t, err, "did not find address for actor") + require.ErrorContains(t, err, "did not find address for actor") }) t.Run("found host and appid", func(t *testing.T) { @@ -408,7 +408,7 @@ func TestLookupActor(t *testing.T) { ActorID: "id0", }) require.Error(t, err) - assert.ErrorContains(t, err, "did not find address for actor") + require.ErrorContains(t, err, "did not find address for actor") assert.Empty(t, lar.Address) assert.Empty(t, lar.AppID) }) diff --git a/pkg/actors/reminders/reminders_test.go b/pkg/actors/reminders/reminders_test.go index 4e8e8d1b716..21065853318 100644 --- a/pkg/actors/reminders/reminders_test.go +++ b/pkg/actors/reminders/reminders_test.go @@ -84,33 +84,33 @@ func TestStoreIsNotInitialized(t *testing.T) { t.Run("getReminderTrack", func(t *testing.T) { r, err := testReminders.getReminderTrack(context.Background(), "foo||bar") - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, r) }) t.Run("updateReminderTrack", func(t *testing.T) { err := testReminders.updateReminderTrack(context.Background(), "foo||bar", 1, testReminders.clock.Now(), nil) - assert.Error(t, err) + require.Error(t, err) }) t.Run("CreateReminder", func(t *testing.T) { req := internal.CreateReminderRequest{} reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(context.Background(), reminder) - assert.Error(t, err) + require.Error(t, err) }) t.Run("getRemindersForActorType", func(t *testing.T) { r1, r2, err := testReminders.getRemindersForActorType(context.Background(), "foo", false) assert.Nil(t, r1) assert.Nil(t, r2) - assert.Error(t, err) + require.Error(t, err) }) t.Run("DeleteReminder", func(t *testing.T) { err := testReminders.DeleteReminder(context.Background(), internal.DeleteReminderRequest{}) - assert.Error(t, err) + require.Error(t, err) }) } @@ -274,7 +274,7 @@ func TestSetReminderTrack(t *testing.T) { actorType, actorID := getTestActorTypeAndID() noRepetition := -1 err := testReminders.updateReminderTrack(context.Background(), constructCompositeKey(actorType, actorID), noRepetition, testReminders.clock.Now(), nil) - assert.NoError(t, err) + require.NoError(t, err) } func TestGetReminderTrack(t *testing.T) { @@ -344,7 +344,7 @@ func TestCreateReminder(t *testing.T) { Data: nil, } reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) require.NoError(t, err) }() @@ -360,7 +360,7 @@ func TestCreateReminder(t *testing.T) { Data: nil, } reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) require.NoError(t, err) }() @@ -386,7 +386,7 @@ func TestCreateReminder(t *testing.T) { Data: nil, } reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testRemindersWithPartition.CreateReminder(ctx, reminder) require.NoError(t, err) } @@ -395,19 +395,19 @@ func TestCreateReminder(t *testing.T) { // Does not migrate yet _, actorTypeMetadata, err := testRemindersWithPartition.getRemindersForActorType(context.Background(), actorType, false) require.NoError(t, err) - assert.True(t, len(actorTypeMetadata.ID) > 0) + assert.NotEmpty(t, actorTypeMetadata.ID) assert.Equal(t, 0, actorTypeMetadata.RemindersMetadata.PartitionCount) // Check for 2nd type. _, actorTypeMetadata, err = testRemindersWithPartition.getRemindersForActorType(context.Background(), secondActorType, false) require.NoError(t, err) - assert.True(t, len(actorTypeMetadata.ID) > 0) + assert.NotEmpty(t, actorTypeMetadata.ID) assert.Equal(t, 0, actorTypeMetadata.RemindersMetadata.PartitionCount) // Migrates here. reminderReferences, actorTypeMetadata, err := testRemindersWithPartition.getRemindersForActorType(context.Background(), actorType, true) require.NoError(t, err) - assert.True(t, len(actorTypeMetadata.ID) > 0) + assert.NotEmpty(t, actorTypeMetadata.ID) assert.Equal(t, TestActorMetadataPartitionCount, actorTypeMetadata.RemindersMetadata.PartitionCount) partitions := map[uint32]bool{} @@ -418,14 +418,14 @@ func TestCreateReminder(t *testing.T) { reminders[reminderRef.Reminder.Name] = true assert.Equal(t, actorTypeMetadata.ID, reminderRef.ActorMetadataID) } - assert.Equal(t, TestActorMetadataPartitionCount, len(partitions)) - assert.Equal(t, numReminders, len(reminderReferences)) - assert.Equal(t, numReminders, len(reminders)) + assert.Len(t, partitions, TestActorMetadataPartitionCount) + assert.Len(t, reminderReferences, numReminders) + assert.Len(t, reminders, numReminders) // Check for 2nd type. secondReminderReferences, secondTypeMetadata, err := testRemindersWithPartition.getRemindersForActorType(context.Background(), secondActorType, true) require.NoError(t, err) - assert.True(t, len(secondTypeMetadata.ID) > 0) + assert.NotEmpty(t, secondTypeMetadata.ID) assert.Equal(t, 20, secondTypeMetadata.RemindersMetadata.PartitionCount) partitions = map[uint32]bool{} @@ -436,9 +436,9 @@ func TestCreateReminder(t *testing.T) { reminders[reminderRef.Reminder.Name] = true assert.Equal(t, secondTypeMetadata.ID, reminderRef.ActorMetadataID) } - assert.Equal(t, 20, len(partitions)) - assert.Equal(t, numReminders, len(secondReminderReferences)) - assert.Equal(t, numReminders, len(reminders)) + assert.Len(t, partitions, 20) + assert.Len(t, secondReminderReferences, numReminders) + assert.Len(t, reminders, numReminders) } func newTestRemindersWithMockAndActorMetadataPartition() *reminders { @@ -544,13 +544,13 @@ func TestOverrideReminder(t *testing.T) { actorType, actorID := getTestActorTypeAndID() req := createReminderData(actorID, actorType, "reminder1", "1s", "1s", "", "a") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) require.NoError(t, err) req2 := createReminderData(actorID, actorType, "reminder1", "1s", "1s", "", "b") reminder2, err := req2.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder2) require.NoError(t, err) reminders, _, err := testReminders.getRemindersForActorType(ctx, actorType, false) @@ -567,13 +567,13 @@ func TestOverrideReminder(t *testing.T) { actorType, actorID := getTestActorTypeAndID() req := createReminderData(actorID, actorType, "reminder1", "1s", "1s", "", "") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) req2 := createReminderData(actorID, actorType, "reminder1", "1s", "2s", "", "") reminder2, err := req2.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) testReminders.CreateReminder(ctx, reminder2) reminders, _, err := testReminders.getRemindersForActorType(context.Background(), actorType, false) require.NoError(t, err) @@ -588,15 +588,15 @@ func TestOverrideReminder(t *testing.T) { actorType, actorID := getTestActorTypeAndID() req := createReminderData(actorID, actorType, "reminder1", "1s", "1s", "", "") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) req2 := createReminderData(actorID, actorType, "reminder1", "2s", "1s", "", "") reminder2, err := req2.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder2) - assert.NoError(t, err) + require.NoError(t, err) reminders, _, err := testReminders.getRemindersForActorType(context.Background(), actorType, false) require.NoError(t, err) assert.Equal(t, "2s", reminders[0].Reminder.Period.String()) @@ -610,18 +610,18 @@ func TestOverrideReminder(t *testing.T) { actorType, actorID := getTestActorTypeAndID() req := createReminderData(actorID, actorType, "reminder1", "2s", "1s", "PT5M", "") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) ttl := "9999-09-01T00:00:00Z" origTime, err := time.Parse(time.RFC3339, ttl) - assert.NoError(t, err) + require.NoError(t, err) req2 := createReminderData(actorID, actorType, "reminder1", "2s", "1s", ttl, "") reminder2, err := req2.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder2) - assert.NoError(t, err) + require.NoError(t, err) reminders, _, err := testReminders.getRemindersForActorType(context.Background(), actorType, false) require.NoError(t, err) require.NotEmpty(t, reminders) @@ -647,13 +647,13 @@ func TestOverrideReminderCancelsActiveReminders(t *testing.T) { req := createReminderData(actorID, actorType, reminderName, "10s", "1s", "", "a") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) req2 := createReminderData(actorID, actorType, reminderName, "9s", "1s", "", "b") reminder2, err := req2.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) testReminders.CreateReminder(ctx, reminder2) reminders, _, err := testReminders.getRemindersForActorType(context.Background(), actorType, false) require.NoError(t, err) @@ -664,10 +664,10 @@ func TestOverrideReminderCancelsActiveReminders(t *testing.T) { req3 := createReminderData(actorID, actorType, reminderName, "8s", "2s", "", "c") reminder3, err := req3.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) testReminders.CreateReminder(ctx, reminder3) reminders, _, err = testReminders.getRemindersForActorType(context.Background(), actorType, false) - assert.NoError(t, err) + require.NoError(t, err) // Check reminder is updated assert.Equal(t, "8s", reminders[0].Reminder.Period.String()) assert.Equal(t, testReminders.clock.Now().Add(2*time.Second), reminders[0].Reminder.RegisteredTime) @@ -719,27 +719,27 @@ func TestOverrideReminderCancelsMultipleActiveReminders(t *testing.T) { req := createReminderData(actorID, actorType, reminderName, "10s", "3s", "", "a") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) req2 := createReminderData(actorID, actorType, reminderName, "8s", "4s", "", "b") reminder2, err := req2.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder2) - assert.NoError(t, err) + require.NoError(t, err) req3 := createReminderData(actorID, actorType, reminderName, "8s", "4s", "", "c") reminder3, err := req3.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder3) - assert.NoError(t, err) + require.NoError(t, err) // due time for reminders is 4s, advance less advanceTickers(t, clock, time.Second*2) // Check reminder is updated reminders, _, err := testReminders.getRemindersForActorType(context.Background(), actorType, false) - assert.NoError(t, err) + require.NoError(t, err) // The statestore could have either reminder2 or reminder3 based on the timing. // Therefore, not verifying data field assert.Equal(t, "8s", reminders[0].Reminder.Period.String()) @@ -747,10 +747,10 @@ func TestOverrideReminderCancelsMultipleActiveReminders(t *testing.T) { req4 := createReminderData(actorID, actorType, reminderName, "7s", "2s", "", "d") reminder4, err := req4.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) testReminders.CreateReminder(ctx, reminder4) reminders, _, err = testReminders.getRemindersForActorType(context.Background(), actorType, false) - assert.NoError(t, err) + require.NoError(t, err) // due time for reminder is 2s advanceTickers(t, clock, time.Second) @@ -788,10 +788,10 @@ func TestDeleteReminderWithPartitions(t *testing.T) { // Create a reminder req := createReminderData(actorID, actorType, "reminder1", "1s", "1s", "", "") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) require.NoError(t, err) - assert.Equal(t, 1, len(testReminders.reminders[actorType])) + assert.Len(t, testReminders.reminders[actorType], 1) // Delete the reminder startCount := stateStore.CallCount("Multi") @@ -801,7 +801,7 @@ func TestDeleteReminderWithPartitions(t *testing.T) { ActorType: actorType, }) require.NoError(t, err) - assert.Equal(t, 0, len(testReminders.reminders[actorType])) + assert.Empty(t, testReminders.reminders[actorType]) // There should have been 1 Multi operation in the state store require.Equal(t, startCount+1, stateStore.CallCount("Multi")) @@ -815,7 +815,7 @@ func TestDeleteReminderWithPartitions(t *testing.T) { ActorType: actorType, }) require.NoError(t, err) - assert.Equal(t, 0, len(testReminders.reminders[actorType])) + assert.Empty(t, testReminders.reminders[actorType]) // There should have been no Multi operation in the state store require.Equal(t, startCount, stateStore.CallCount("Multi")) @@ -844,19 +844,19 @@ func TestDeleteReminder(t *testing.T) { go func() { req := createReminderData(actorID, actorType, "reminder1", "1s", "1s", "", "") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) errs <- testReminders.CreateReminder(ctx, reminder) }() go func() { req := createReminderData(actorID, actorType, "reminder2", "1s", "1s", "", "") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) errs <- testReminders.CreateReminder(ctx, reminder) }() for i := 0; i < 2; i++ { require.NoError(t, <-errs) } - assert.Equal(t, 2, len(testReminders.reminders[actorType])) + assert.Len(t, testReminders.reminders[actorType], 2) // Delete the reminders (in parallel) startCount := store.CallCount("Multi") @@ -877,7 +877,7 @@ func TestDeleteReminder(t *testing.T) { for i := 0; i < 2; i++ { require.NoError(t, <-errs) } - assert.Equal(t, 0, len(testReminders.reminders[actorType])) + assert.Empty(t, testReminders.reminders[actorType]) // There should have been 2 Multi operations in the state store require.Equal(t, startCount+2, store.CallCount("Multi")) @@ -891,7 +891,7 @@ func TestDeleteReminder(t *testing.T) { ActorType: actorType, }) require.NoError(t, err) - assert.Equal(t, 0, len(testReminders.reminders[actorType])) + assert.Empty(t, testReminders.reminders[actorType]) // There should have been no Multi operation in the state store require.Equal(t, startCount, store.CallCount("Multi")) @@ -1044,7 +1044,7 @@ func TestReminderRepeats(t *testing.T) { } reminder, err := req.NewReminder(testReminders.clock.Now()) if test.expRepeats == 0 { - assert.ErrorContains(t, err, "has zero repetitions") + require.ErrorContains(t, err, "has zero repetitions") return } require.NoError(t, err) @@ -1052,7 +1052,7 @@ func TestReminderRepeats(t *testing.T) { require.NoError(t, err) testReminders.remindersLock.RLock() - assert.Equal(t, 1, len(testReminders.reminders[actorType])) + assert.Len(t, testReminders.reminders[actorType], 1) testReminders.remindersLock.RUnlock() count := 0 @@ -1235,9 +1235,9 @@ func reminderValidation(dueTime, period, ttl, msg string) func(t *testing.T) { err = testReminders.CreateReminder(context.Background(), reminder) } if len(msg) != 0 { - assert.ErrorContains(t, err, msg) + require.ErrorContains(t, err, msg) } else { - assert.Error(t, err) + require.Error(t, err) } } } @@ -1266,15 +1266,15 @@ func TestGetReminder(t *testing.T) { ctx := context.Background() req := createReminderData(actorID, actorType, "reminder1", "1s", "1s", "", "a") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) testReminders.CreateReminder(ctx, reminder) - assert.Equal(t, 1, len(testReminders.reminders[actorType])) + assert.Len(t, testReminders.reminders[actorType], 1) r, err := testReminders.GetReminder(ctx, &internal.GetReminderRequest{ Name: "reminder1", ActorID: actorID, ActorType: actorType, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, json.RawMessage(`"a"`), r.Data) assert.Equal(t, "1s", r.Period.String()) assert.Equal(t, "1s", r.DueTime) @@ -1291,9 +1291,9 @@ func TestReminderFires(t *testing.T) { ctx := context.Background() req := createReminderData(actorID, actorType, "reminder1", "100ms", "100ms", "", "a") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) advanceTickers(t, clock, time.Millisecond*101) @@ -1318,12 +1318,12 @@ func TestReminderDueDate(t *testing.T) { actorKey := constructCompositeKey(actorType, actorID) req := createReminderData(actorID, actorType, "reminder1", "100ms", "500ms", "", "a") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) track, err := testReminders.getReminderTrack(context.Background(), constructCompositeKey(actorKey, "reminder1")) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, track.LastFiredTime) advanceTickers(t, clock, time.Millisecond*500) @@ -1356,7 +1356,7 @@ func TestReminderPeriod(t *testing.T) { Data: json.RawMessage(`"a"`), } reminder, errRem := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, errRem) + require.NoError(t, errRem) require.NoError(t, testReminders.CreateReminder(ctx, reminder)) advanceTickers(t, clock, 0) @@ -1400,9 +1400,9 @@ func TestReminderFiresOnceWithEmptyPeriod(t *testing.T) { actorKey := constructCompositeKey(actorType, actorID) req := createReminderData(actorID, actorType, "reminder1", "", "100ms", "", "a") reminder, err := req.NewReminder(testReminders.clock.Now()) - assert.NoError(t, err) + require.NoError(t, err) err = testReminders.CreateReminder(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) clock.Step(100 * time.Millisecond) diff --git a/pkg/actors/timers/timers_test.go b/pkg/actors/timers/timers_test.go index 6e912c0350f..85f9cb54cff 100644 --- a/pkg/actors/timers/timers_test.go +++ b/pkg/actors/timers/timers_test.go @@ -174,12 +174,12 @@ func TestDeleteTimer(t *testing.T) { reminder, err := req.NewReminder(testTimers.clock.Now()) require.NoError(t, err) err = testTimers.CreateTimer(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, int64(1), testTimers.GetActiveTimersCount(actorType)) err = testTimers.DeleteTimer(ctx, req.Key()) - assert.NoError(t, err) + require.NoError(t, err) assert.Eventuallyf(t, func() bool { @@ -415,7 +415,7 @@ func TestTimerRepeats(t *testing.T) { } reminder, err := req.NewReminder(testTimers.clock.Now()) if test.expRepeats == 0 { - assert.ErrorContains(t, err, "has zero repetitions") + require.ErrorContains(t, err, "has zero repetitions") return } require.NoError(t, err) @@ -424,7 +424,7 @@ func TestTimerRepeats(t *testing.T) { t.Cleanup(cancel) err = testTimers.CreateTimer(ctx, reminder) - assert.NoError(t, err) + require.NoError(t, err) count := 0 @@ -505,7 +505,7 @@ func TestTimerTTL(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - assert.NoError(t, testTimers.CreateTimer(ctx, reminder)) + require.NoError(t, testTimers.CreateTimer(ctx, reminder)) count := 0 @@ -560,7 +560,7 @@ func timerValidation(dueTime, period, ttl, msg string) func(t *testing.T) { if err == nil { err = testTimers.CreateTimer(context.Background(), reminder) } - assert.ErrorContains(t, err, msg) + require.ErrorContains(t, err, msg) } } @@ -699,7 +699,7 @@ func TestTimerCounter(t *testing.T) { Data: json.RawMessage(`"testTimer"`), }) err := provider.CreateTimer(context.Background(), timer) - assert.NoError(t, err) + require.NoError(t, err) }(i) } @@ -716,7 +716,7 @@ func TestTimerCounter(t *testing.T) { Data: json.RawMessage(`"testTimer"`), }) err := provider.CreateTimer(context.Background(), timer) - assert.NoError(t, err) + require.NoError(t, err) }(i) } @@ -756,7 +756,7 @@ func TestTimerCounter(t *testing.T) { Name: fmt.Sprintf("positiveTimer%d", idx), } err := provider.DeleteTimer(context.Background(), timer.Key()) - assert.NoError(t, err) + require.NoError(t, err) }(i) } diff --git a/pkg/apis/configuration/v1alpha1/types.go b/pkg/apis/configuration/v1alpha1/types.go index 5f3e4680648..087bc1fba44 100644 --- a/pkg/apis/configuration/v1alpha1/types.go +++ b/pkg/apis/configuration/v1alpha1/types.go @@ -198,9 +198,9 @@ type TracingSpec struct { // OtelSpec defines Otel exporter configurations. type OtelSpec struct { - Protocol string `json:"protocol" yaml:"protocol"` + Protocol string `json:"protocol" yaml:"protocol"` EndpointAddress string `json:"endpointAddress" yaml:"endpointAddress"` - IsSecure *bool `json:"isSecure" yaml:"isSecure"` + IsSecure *bool `json:"isSecure" yaml:"isSecure"` } // ZipkinSpec defines Zipkin trace configurations. @@ -242,7 +242,7 @@ type AppPolicySpec struct { // AppOperationAction defines the data structure for each app operation. type AppOperationAction struct { - Operation string `json:"name" yaml:"name"` + Operation string `json:"name" yaml:"name"` Action string `json:"action" yaml:"action"` // +optional HTTPVerb []string `json:"httpVerb,omitempty" yaml:"httpVerb,omitempty"` @@ -260,7 +260,7 @@ type AccessControlSpec struct { // FeatureSpec defines the features that are enabled/disabled. type FeatureSpec struct { - Name string `json:"name" yaml:"name"` + Name string `json:"name" yaml:"name"` Enabled *bool `json:"enabled" yaml:"enabled"` } diff --git a/pkg/apis/resiliency/v1alpha1/types.go b/pkg/apis/resiliency/v1alpha1/types.go index 9115417e1c4..3431d95bf56 100644 --- a/pkg/apis/resiliency/v1alpha1/types.go +++ b/pkg/apis/resiliency/v1alpha1/types.go @@ -41,58 +41,58 @@ func (r Resiliency) String() string { type ResiliencySpec struct { Policies Policies `json:"policies"` - Targets Targets `json:"targets" yaml:"targets"` + Targets Targets `json:"targets" yaml:"targets"` } type Policies struct { - Timeouts map[string]string `json:"timeouts,omitempty" yaml:"timeouts,omitempty"` - Retries map[string]Retry `json:"retries,omitempty" yaml:"retries,omitempty"` + Timeouts map[string]string `json:"timeouts,omitempty" yaml:"timeouts,omitempty"` + Retries map[string]Retry `json:"retries,omitempty" yaml:"retries,omitempty"` CircuitBreakers map[string]CircuitBreaker `json:"circuitBreakers,omitempty" yaml:"circuitBreakers,omitempty"` } type Retry struct { - Policy string `json:"policy,omitempty" yaml:"policy,omitempty"` - Duration string `json:"duration,omitempty" yaml:"duration,omitempty"` + Policy string `json:"policy,omitempty" yaml:"policy,omitempty"` + Duration string `json:"duration,omitempty" yaml:"duration,omitempty"` MaxInterval string `json:"maxInterval,omitempty" yaml:"maxInterval,omitempty"` - MaxRetries *int `json:"maxRetries,omitempty" yaml:"maxRetries,omitempty"` + MaxRetries *int `json:"maxRetries,omitempty" yaml:"maxRetries,omitempty"` } type CircuitBreaker struct { MaxRequests int `json:"maxRequests,omitempty" yaml:"maxRequests,omitempty"` - Interval string `json:"interval,omitempty" yaml:"interval,omitempty"` - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Trip string `json:"trip,omitempty" yaml:"trip,omitempty"` + Interval string `json:"interval,omitempty" yaml:"interval,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + Trip string `json:"trip,omitempty" yaml:"trip,omitempty"` } type Targets struct { - Apps map[string]EndpointPolicyNames `json:"apps,omitempty" yaml:"apps,omitempty"` - Actors map[string]ActorPolicyNames `json:"actors,omitempty" yaml:"actors,omitempty"` + Apps map[string]EndpointPolicyNames `json:"apps,omitempty" yaml:"apps,omitempty"` + Actors map[string]ActorPolicyNames `json:"actors,omitempty" yaml:"actors,omitempty"` Components map[string]ComponentPolicyNames `json:"components,omitempty" yaml:"components,omitempty"` } type ComponentPolicyNames struct { - Inbound PolicyNames `json:"inbound,omitempty" yaml:"inbound,omitempty"` + Inbound PolicyNames `json:"inbound,omitempty" yaml:"inbound,omitempty"` Outbound PolicyNames `json:"outbound,omitempty" yaml:"outbound,omitempty"` } type PolicyNames struct { - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` } type EndpointPolicyNames struct { - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` - CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` + CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` CircuitBreakerCacheSize int `json:"circuitBreakerCacheSize,omitempty" yaml:"circuitBreakerCacheSize,omitempty"` } type ActorPolicyNames struct { - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` - CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` - CircuitBreakerScope string `json:"circuitBreakerScope,omitempty" yaml:"circuitBreakerScope,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` + CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` + CircuitBreakerScope string `json:"circuitBreakerScope,omitempty" yaml:"circuitBreakerScope,omitempty"` CircuitBreakerCacheSize int `json:"circuitBreakerCacheSize,omitempty" yaml:"circuitBreakerCacheSize,omitempty"` } diff --git a/pkg/apphealth/health_test.go b/pkg/apphealth/health_test.go index 9e8f119be4b..0599da4e548 100644 --- a/pkg/apphealth/health_test.go +++ b/pkg/apphealth/health_test.go @@ -188,7 +188,7 @@ func Test_StartProbes(t *testing.T) { go func() { defer close(done) - assert.NoError(t, h.StartProbes(ctx)) + require.NoError(t, h.StartProbes(ctx)) }() // Wait for ticker to start, @@ -244,7 +244,7 @@ func Test_StartProbes(t *testing.T) { done := make(chan struct{}) go func() { defer close(done) - assert.NoError(t, h.StartProbes(ctx)) + require.NoError(t, h.StartProbes(ctx)) }() // Wait for ticker to start, @@ -279,7 +279,7 @@ func Test_StartProbes(t *testing.T) { done := make(chan struct{}) go func() { defer close(done) - assert.NoError(t, h.StartProbes(ctx)) + require.NoError(t, h.StartProbes(ctx)) }() h.OnHealthChange(func(ctx context.Context, status uint8) { diff --git a/pkg/channel/grpc/grpc_channel.go b/pkg/channel/grpc/grpc_channel.go index de9ebf88f37..01eb8605ba2 100644 --- a/pkg/channel/grpc/grpc_channel.go +++ b/pkg/channel/grpc/grpc_channel.go @@ -98,7 +98,7 @@ func (g *Channel) invokeMethodV1(ctx context.Context, req *invokev1.InvokeMethod return nil, err } - md := invokev1.InternalMetadataToGrpcMetadata(ctx, pd.Metadata, true) + md := invokev1.InternalMetadataToGrpcMetadata(ctx, pd.GetMetadata(), true) if g.appMetadataToken != "" { md.Set(securityConsts.APITokenHeader, g.appMetadataToken) @@ -116,7 +116,7 @@ func (g *Channel) invokeMethodV1(ctx context.Context, req *invokev1.InvokeMethod grpc.MaxCallRecvMsgSize(g.maxRequestBodySizeMB << 20), } - resp, err := g.appCallbackClient.OnInvoke(ctx, pd.Message, opts...) + resp, err := g.appCallbackClient.OnInvoke(ctx, pd.GetMessage(), opts...) if g.ch != nil { <-g.ch @@ -127,7 +127,7 @@ func (g *Channel) invokeMethodV1(ctx context.Context, req *invokev1.InvokeMethod // Convert status code respStatus := status.Convert(err) // Prepare response - rsp = invokev1.NewInvokeMethodResponse(int32(respStatus.Code()), respStatus.Message(), respStatus.Proto().Details) + rsp = invokev1.NewInvokeMethodResponse(int32(respStatus.Code()), respStatus.Message(), respStatus.Proto().GetDetails()) } else { rsp = invokev1.NewInvokeMethodResponse(int32(codes.OK), "", nil) } diff --git a/pkg/channel/grpc/grpc_channel_test.go b/pkg/channel/grpc/grpc_channel_test.go index 223dbe5e90c..97ac5bba616 100644 --- a/pkg/channel/grpc/grpc_channel_test.go +++ b/pkg/channel/grpc/grpc_channel_test.go @@ -106,7 +106,7 @@ func TestInvokeMethod(t *testing.T) { WithHTTPExtension(http.MethodPost, "param1=val1¶m2=val2") defer req.Close() response, err := c.InvokeMethod(ctx, req, "") - assert.NoError(t, err) + require.NoError(t, err) defer response.Close() assert.Equal(t, "application/json", response.ContentType()) @@ -129,7 +129,7 @@ func TestInvokeMethod(t *testing.T) { response, err := c.InvokeMethod(ctx, req, "") require.Error(t, err) - assert.ErrorIs(t, err, io.ErrClosedPipe) + require.ErrorIs(t, err, io.ErrClosedPipe) if response != nil { defer response.Close() } @@ -154,19 +154,19 @@ func TestHealthProbe(t *testing.T) { // OK response success, err = c.HealthProbe(ctx) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, success) // Non-2xx status code mockServer.Error = errors.New("test failure") success, err = c.HealthProbe(ctx) - assert.Error(t, err) + require.Error(t, err) assert.False(t, success) // Closed connection closeConnection(t, conn) success, err = c.HealthProbe(ctx) - assert.Error(t, err) + require.Error(t, err) assert.False(t, success) } diff --git a/pkg/channel/http/http_channel.go b/pkg/channel/http/http_channel.go index 057520a7999..959b59d8dee 100644 --- a/pkg/channel/http/http_channel.go +++ b/pkg/channel/http/http_channel.go @@ -120,15 +120,15 @@ func (h *Channel) GetAppConfig(ctx context.Context, appID string) (*config.Appli var config config.ApplicationConfig - if resp.Status().Code != http.StatusOK { + if resp.Status().GetCode() != http.StatusOK { return &config, nil } // Get versioning info, currently only v1 is supported. headers := resp.Headers() var version string - if val, ok := headers["dapr-app-config-version"]; ok && len(val.Values) > 0 { - version = val.Values[0] + if val, ok := headers["dapr-app-config-version"]; ok && len(val.GetValues()) > 0 { + version = val.GetValues()[0] } switch version { @@ -152,7 +152,7 @@ func (h *Channel) InvokeMethod(ctx context.Context, req *invokev1.InvokeMethodRe return nil, status.Error(codes.InvalidArgument, "missing HTTP extension field") } // Go's net/http library does not support sending requests with the CONNECT method - if httpExt.Verb == commonv1pb.HTTPExtension_NONE || httpExt.Verb == commonv1pb.HTTPExtension_CONNECT { //nolint:nosnakecase + if httpExt.GetVerb() == commonv1pb.HTTPExtension_NONE || httpExt.GetVerb() == commonv1pb.HTTPExtension_CONNECT { //nolint:nosnakecase return nil, status.Error(codes.InvalidArgument, "invalid HTTP verb") } @@ -228,7 +228,7 @@ func (h *Channel) invokeMethodV1(ctx context.Context, req *invokev1.InvokeMethod }() // Emit metric when request is sent - diag.DefaultHTTPMonitoring.ClientRequestStarted(ctx, int64(len(req.Message().Data.GetValue()))) + diag.DefaultHTTPMonitoring.ClientRequestStarted(ctx, int64(len(req.Message().GetData().GetValue()))) startRequest := time.Now() var resp *http.Response @@ -280,7 +280,7 @@ func (h *Channel) invokeMethodV1(ctx context.Context, req *invokev1.InvokeMethod return nil, err } - diag.DefaultHTTPMonitoring.ClientRequestCompleted(ctx, strconv.Itoa(int(rsp.Status().Code)), contentLength, elapsedMs) + diag.DefaultHTTPMonitoring.ClientRequestCompleted(ctx, strconv.Itoa(int(rsp.Status().GetCode())), contentLength, elapsedMs) return rsp, nil } @@ -288,8 +288,8 @@ func (h *Channel) invokeMethodV1(ctx context.Context, req *invokev1.InvokeMethod func (h *Channel) constructRequest(ctx context.Context, req *invokev1.InvokeMethodRequest, appID string) (*http.Request, error) { // Construct app channel URI: VERB http://localhost:3000/method?query1=value1 msg := req.Message() - verb := msg.HttpExtension.Verb.String() - method := msg.Method + verb := msg.GetHttpExtension().GetVerb().String() + method := msg.GetMethod() var headers []commonapi.NameValuePair uri := strings.Builder{} diff --git a/pkg/channel/http/http_channel_test.go b/pkg/channel/http/http_channel_test.go index 4836f63f292..46af474c1b7 100644 --- a/pkg/channel/http/http_channel_test.go +++ b/pkg/channel/http/http_channel_test.go @@ -174,7 +174,7 @@ func TestInvokeMethodMiddlewaresPipeline(t *testing.T) { require.NoError(t, err) defer resp.Close() assert.Equal(t, 1, called) - assert.Equal(t, int32(http.StatusOK), resp.Status().Code) + assert.Equal(t, int32(http.StatusOK), resp.Status().GetCode()) }) t.Run("request can be short-circuited by middleware pipeline", func(t *testing.T) { @@ -207,7 +207,7 @@ func TestInvokeMethodMiddlewaresPipeline(t *testing.T) { require.NoError(t, err) defer resp.Close() assert.Equal(t, 1, called) - assert.Equal(t, int32(http.StatusBadGateway), resp.Status().Code) + assert.Equal(t, int32(http.StatusBadGateway), resp.Status().GetCode()) }) server.Close() @@ -239,7 +239,7 @@ func TestInvokeMethodMiddlewaresPipeline(t *testing.T) { require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() - require.Equal(t, int32(http.StatusOK), resp.Status().Code) + require.Equal(t, int32(http.StatusOK), resp.Status().GetCode()) assert.Equal(t, "text/plain", resp.ContentType()) assert.Equal(t, "M'ILLUMINO D'IMMENSO", string(body)) }) @@ -271,7 +271,7 @@ func TestInvokeMethodMiddlewaresPipeline(t *testing.T) { require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() - require.Equal(t, int32(http.StatusOK), resp.Status().Code) + require.Equal(t, int32(http.StatusOK), resp.Status().GetCode()) assert.Equal(t, "text/plain", resp.ContentType()) assert.Equal(t, "true", string(body)) }) @@ -303,7 +303,7 @@ func TestInvokeMethodMiddlewaresPipeline(t *testing.T) { require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() - require.Equal(t, int32(http.StatusOK), resp.Status().Code) + require.Equal(t, int32(http.StatusOK), resp.Status().GetCode()) assert.Equal(t, "text/plain", resp.ContentType()) assert.Equal(t, "FALSE", string(body)) }) @@ -336,7 +336,7 @@ func TestInvokeMethodMiddlewaresPipeline(t *testing.T) { require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() - require.Equal(t, int32(http.StatusOK), resp.Status().Code) + require.Equal(t, int32(http.StatusOK), resp.Status().GetCode()) assert.Equal(t, "text/plain", resp.ContentType()) assert.Equal(t, "TRUE", string(body)) }) @@ -366,14 +366,14 @@ func TestInvokeMethodHeaders(t *testing.T) { resp, err := c.InvokeMethod(ctx, fakeReq, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() headers := map[string][]string{} err = json.NewDecoder(resp.RawData()).Decode(&headers) require.NoError(t, err) require.Len(t, headers["Content-Type"], 1) - assert.Equal(t, headers["Content-Type"][0], "test/dapr") + assert.Equal(t, "test/dapr", headers["Content-Type"][0]) }) t.Run("content-type is omitted when empty", func(t *testing.T) { @@ -394,7 +394,7 @@ func TestInvokeMethodHeaders(t *testing.T) { resp, err := c.InvokeMethod(ctx, fakeReq, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() headers := map[string][]string{} @@ -428,7 +428,7 @@ func TestInvokeMethod(t *testing.T) { resp, err := c.InvokeMethod(ctx, fakeReq, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() assert.Equal(t, "param1=val1¶m2=val2", string(body)) @@ -452,7 +452,7 @@ func TestInvokeMethod(t *testing.T) { resp, err := c.InvokeMethod(ctx, fakeReq, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() assert.Equal(t, "", string(body)) @@ -484,7 +484,7 @@ func TestInvokeMethodMaxConcurrency(t *testing.T) { WithHTTPExtension("GET", "") defer req.Close() resp, err := c.InvokeMethod(ctx, req, "") - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() wg.Done() }() @@ -519,7 +519,7 @@ func TestInvokeMethodMaxConcurrency(t *testing.T) { WithHTTPExtension("GET", "") defer req.Close() resp, err := c.InvokeMethod(ctx, req, "") - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() wg.Done() }() @@ -559,9 +559,9 @@ func TestInvokeMethodMaxConcurrency(t *testing.T) { defer resp.Close() } if i < 10 { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } } @@ -592,14 +592,14 @@ func TestInvokeWithHeaders(t *testing.T) { resp, err := c.InvokeMethod(ctx, req, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() actual := map[string]string{} json.Unmarshal(body, &actual) - assert.NoError(t, err) + require.NoError(t, err) assert.Contains(t, "v1", actual["H1"]) assert.Contains(t, "v2", actual["H2"]) testServer.Close() @@ -624,7 +624,7 @@ func TestContentType(t *testing.T) { resp, err := c.InvokeMethod(ctx, req, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() assert.Equal(t, "", resp.ContentType()) @@ -649,7 +649,7 @@ func TestContentType(t *testing.T) { resp, err := c.InvokeMethod(ctx, req, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() assert.Equal(t, "text/plain; charset=utf-8", resp.ContentType()) @@ -674,7 +674,7 @@ func TestContentType(t *testing.T) { resp, err := c.InvokeMethod(ctx, req, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() assert.Equal(t, "text/plain; charset=utf-8", resp.ContentType()) @@ -704,13 +704,13 @@ func TestContentLength(t *testing.T) { resp, err := c.InvokeMethod(ctx, req, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() actual := map[string]string{} json.Unmarshal(body, &actual) _, hasContentLength := actual["Content-Length"] - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, hasContentLength) testServer.Close() } @@ -734,7 +734,7 @@ func TestAppToken(t *testing.T) { resp, err := c.InvokeMethod(ctx, req, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() @@ -742,7 +742,7 @@ func TestAppToken(t *testing.T) { json.Unmarshal(body, &actual) _, hasToken := actual["Dapr-Api-Token"] - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, hasToken) testServer.Close() }) @@ -764,7 +764,7 @@ func TestAppToken(t *testing.T) { resp, err := c.InvokeMethod(ctx, req, "") // assert - assert.NoError(t, err) + require.NoError(t, err) defer resp.Close() body, _ := resp.RawDataFull() @@ -772,7 +772,7 @@ func TestAppToken(t *testing.T) { json.Unmarshal(body, &actual) _, hasToken := actual["Dapr-Api-Token"] - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, hasToken) testServer.Close() }) @@ -795,19 +795,19 @@ func TestHealthProbe(t *testing.T) { // OK response success, err = c.HealthProbe(ctx) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, success) // Non-2xx status code h.Code = 500 success, err = c.HealthProbe(ctx) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, success) // Stopped server // Should still return no error, but a failed probe testServer.Close() success, err = c.HealthProbe(ctx) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, success) } diff --git a/pkg/channel/testing/grpc_channel_server_mock.go b/pkg/channel/testing/grpc_channel_server_mock.go index c591b3eff62..6c3ca9a07d1 100644 --- a/pkg/channel/testing/grpc_channel_server_mock.go +++ b/pkg/channel/testing/grpc_channel_server_mock.go @@ -54,22 +54,22 @@ func (m *MockServer) Init() { func (m *MockServer) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) { md, _ := metadata.FromIncomingContext(ctx) dt := map[string]string{ - "method": in.Method, + "method": in.GetMethod(), } for k, v := range md { dt[k] = v[0] } - dt["httpverb"] = in.HttpExtension.GetVerb().String() - dt["querystring"] = in.HttpExtension.Querystring + dt["httpverb"] = in.GetHttpExtension().GetVerb().String() + dt["querystring"] = in.GetHttpExtension().GetQuerystring() ds, _ := json.Marshal(dt) return &commonv1pb.InvokeResponse{Data: &anypb.Any{Value: ds}, ContentType: "application/json"}, m.Error } func (m *MockServer) ListTopicSubscriptions(ctx context.Context, in *emptypb.Empty) (*runtimev1pb.ListTopicSubscriptionsResponse, error) { - if m.ListTopicSubscriptionsResponse.Subscriptions != nil { + if m.ListTopicSubscriptionsResponse.GetSubscriptions() != nil { return m.ListTopicSubscriptionsResponse, m.Error } return &runtimev1pb.ListTopicSubscriptionsResponse{ @@ -88,7 +88,7 @@ func (m *MockServer) OnBindingEvent(ctx context.Context, in *runtimev1pb.Binding } func (m *MockServer) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventRequest) (*runtimev1pb.TopicEventResponse, error) { - jsonBytes, marshalErr := in.Extensions.MarshalJSON() + jsonBytes, marshalErr := in.GetExtensions().MarshalJSON() if marshalErr != nil { return nil, marshalErr } @@ -117,9 +117,9 @@ func (m *MockServer) OnBulkTopicEventAlpha1(ctx context.Context, in *runtimev1pb if !m.initialized { m.Init() } - m.RequestsReceived[in.Path] = in + m.RequestsReceived[in.GetPath()] = in if m.BulkResponsePerPath != nil { - return m.BulkResponsePerPath[in.Path], m.Error + return m.BulkResponsePerPath[in.GetPath()], m.Error } return nil, m.Error } diff --git a/pkg/components/bindings/input_pluggable.go b/pkg/components/bindings/input_pluggable.go index 514b9fb0227..9a62fd2b6cc 100644 --- a/pkg/components/bindings/input_pluggable.go +++ b/pkg/components/bindings/input_pluggable.go @@ -62,19 +62,19 @@ func (b *grpcInputBinding) adaptHandler(ctx context.Context, streamingPull proto safeSend := &sync.Mutex{} return func(msg *proto.ReadResponse) { var contentType *string - if len(msg.ContentType) != 0 { + if len(msg.GetContentType()) != 0 { contentType = &msg.ContentType } m := bindings.ReadResponse{ - Data: msg.Data, - Metadata: msg.Metadata, + Data: msg.GetData(), + Metadata: msg.GetMetadata(), ContentType: contentType, } var respErr *proto.AckResponseError bts, err := handler(ctx, &m) if err != nil { - b.logger.Errorf("error when handling message for message: %s", msg.MessageId) + b.logger.Errorf("error when handling message for message: %s", msg.GetMessageId()) respErr = &proto.AckResponseError{ Message: err.Error(), } @@ -92,9 +92,9 @@ func (b *grpcInputBinding) adaptHandler(ctx context.Context, streamingPull proto if err := streamingPull.Send(&proto.ReadRequest{ ResponseData: bts, ResponseError: respErr, - MessageId: msg.MessageId, + MessageId: msg.GetMessageId(), }); err != nil { - b.logger.Errorf("error when ack'ing message %s", msg.MessageId) + b.logger.Errorf("error when ack'ing message %s", msg.GetMessageId()) } } } diff --git a/pkg/components/bindings/input_pluggable_test.go b/pkg/components/bindings/input_pluggable_test.go index b8d94ffdbff..4ab76d3ff7a 100644 --- a/pkg/components/bindings/input_pluggable_test.go +++ b/pkg/components/bindings/input_pluggable_test.go @@ -166,7 +166,7 @@ func TestInputBindingCalls(t *testing.T) { readResponseChan: messageChan, onReadRequestReceived: func(ma *proto.ReadRequest) { messagesAcked.Done() - if ma.ResponseError != nil { + if ma.GetResponseError() != nil { totalResponseErrors.Add(1) } }, diff --git a/pkg/components/bindings/output_pluggable.go b/pkg/components/bindings/output_pluggable.go index 03ecc0f3a1e..1450e4c3e41 100644 --- a/pkg/components/bindings/output_pluggable.go +++ b/pkg/components/bindings/output_pluggable.go @@ -79,13 +79,13 @@ func (b *grpcOutputBinding) Invoke(ctx context.Context, req *bindings.InvokeRequ } var contentType *string - if len(resp.ContentType) != 0 { + if len(resp.GetContentType()) != 0 { contentType = &resp.ContentType } return &bindings.InvokeResponse{ - Data: resp.Data, - Metadata: resp.Metadata, + Data: resp.GetData(), + Metadata: resp.GetMetadata(), ContentType: contentType, }, nil } diff --git a/pkg/components/bindings/output_pluggable_test.go b/pkg/components/bindings/output_pluggable_test.go index 637abd55e20..6b8f3ae3d64 100644 --- a/pkg/components/bindings/output_pluggable_test.go +++ b/pkg/components/bindings/output_pluggable_test.go @@ -163,7 +163,7 @@ func TestOutputBindingCalls(t *testing.T) { Metadata: fakeMetadata, }, onInvokeCalled: func(ir *proto.InvokeRequest) { - assert.Equal(t, ir.Operation, fakeOp) + assert.Equal(t, fakeOp, ir.GetOperation()) }, } @@ -196,7 +196,7 @@ func TestOutputBindingCalls(t *testing.T) { _, err = outputSvc.Invoke(context.Background(), &bindings.InvokeRequest{}) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), srv.invokeCalled.Load()) }) } diff --git a/pkg/components/bindings/registry_test.go b/pkg/components/bindings/registry_test.go index a95fd24c899..137947d1b24 100644 --- a/pkg/components/bindings/registry_test.go +++ b/pkg/components/bindings/registry_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" b "github.com/dapr/components-contrib/bindings" "github.com/dapr/dapr/pkg/components/bindings" @@ -60,21 +61,21 @@ func TestRegistry(t *testing.T) { // assert v0 and v1 assert.True(t, testRegistry.HasInputBinding(componentName, "v0")) p, e := testRegistry.CreateInputBinding(componentName, "v0", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockInput, p) p, e = testRegistry.CreateInputBinding(componentName, "v1", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockInput, p) // assert v2 assert.True(t, testRegistry.HasInputBinding(componentName, "v2")) pV2, e := testRegistry.CreateInputBinding(componentName, "v2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockInputV2, pV2) // check case-insensitivity pV2, e = testRegistry.CreateInputBinding(strings.ToUpper(componentName), "V2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockInputV2, pV2) }) @@ -118,17 +119,17 @@ func TestRegistry(t *testing.T) { // assert v0 and v1 assert.True(t, testRegistry.HasOutputBinding(componentName, "v0")) p, e := testRegistry.CreateOutputBinding(componentName, "v0", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockOutput, p) assert.True(t, testRegistry.HasOutputBinding(componentName, "v1")) p, e = testRegistry.CreateOutputBinding(componentName, "v1", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockOutput, p) // assert v2 assert.True(t, testRegistry.HasOutputBinding(componentName, "v2")) pV2, e := testRegistry.CreateOutputBinding(componentName, "v2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockOutputV2, pV2) }) diff --git a/pkg/components/configuration/registry_test.go b/pkg/components/configuration/registry_test.go index 416bd2a161b..0a3c31d6ab8 100644 --- a/pkg/components/configuration/registry_test.go +++ b/pkg/components/configuration/registry_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" c "github.com/dapr/components-contrib/configuration" "github.com/dapr/dapr/pkg/components/configuration" @@ -53,20 +54,20 @@ func TestRegistry(t *testing.T) { // assert v0 and v1 p, e := testRegistry.Create(componentName, "v0", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) p, e = testRegistry.Create(componentName, "v1", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) // assert v2 pV2, e := testRegistry.Create(componentName, "v2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) // check case-insensitivity pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) }) diff --git a/pkg/components/crypto/registry_test.go b/pkg/components/crypto/registry_test.go index 5eee2d38d9d..6c94b997cc6 100644 --- a/pkg/components/crypto/registry_test.go +++ b/pkg/components/crypto/registry_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" cp "github.com/dapr/components-contrib/crypto" "github.com/dapr/dapr/pkg/components/crypto" @@ -53,20 +54,20 @@ func TestRegistry(t *testing.T) { // assert v0 and v1 p, e := testRegistry.Create(componentName, "v0", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) p, e = testRegistry.Create(componentName, "v1", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) // assert v2 pV2, e := testRegistry.Create(componentName, "v2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) // check case-insensitivity pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) }) diff --git a/pkg/components/disk_manifest_loader_test.go b/pkg/components/disk_manifest_loader_test.go index 1b1d647f361..6a36338fc72 100644 --- a/pkg/components/disk_manifest_loader_test.go +++ b/pkg/components/disk_manifest_loader_test.go @@ -81,7 +81,7 @@ spec: badStructure: "So please ignore me" ` components, errs := request.decodeYaml([]byte(yaml)) - assert.Len(t, components, 0) + assert.Empty(t, components) assert.Len(t, errs, 1) } @@ -90,7 +90,7 @@ func TestDiskManifestLoaderDecodeUnsuspectingFile(t *testing.T) { components, errs := request.decodeYaml([]byte("hey there")) assert.Len(t, errs, 1) - assert.Len(t, components, 0) + assert.Empty(t, components) } func TesSDiskManifesLoadertDecodeInvalidYaml(t *testing.T) { @@ -104,7 +104,7 @@ metadata: name: statestore` components, errs := request.decodeYaml([]byte(yaml)) assert.Len(t, errs, 1) - assert.Len(t, components, 0) + assert.Empty(t, components) } func TestDiskManifestLoaderDecodeValidMultiYaml(t *testing.T) { diff --git a/pkg/components/kubernetes_loader_test.go b/pkg/components/kubernetes_loader_test.go index 1a03c5723f2..760b87a97a3 100644 --- a/pkg/components/kubernetes_loader_test.go +++ b/pkg/components/kubernetes_loader_test.go @@ -10,6 +10,7 @@ import ( "github.com/phayes/freeport" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -31,7 +32,7 @@ func (o *mockOperator) ListComponents(ctx context.Context, in *operatorv1pb.List component := v1alpha1.Component{} component.ObjectMeta.Name = "test" component.ObjectMeta.Labels = map[string]string{ - "podName": in.PodName, + "podName": in.GetPodName(), } component.Spec = v1alpha1.ComponentSpec{ Type: "testtype", @@ -70,7 +71,7 @@ func getOperatorClient(address string) operatorv1pb.OperatorClient { func TestLoadComponents(t *testing.T) { port, _ := freeport.GetFreePort() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - assert.NoError(t, err) + require.NoError(t, err) s := grpc.NewServer() operatorv1pb.RegisterOperatorServer(s, &mockOperator{}) @@ -91,7 +92,7 @@ func TestLoadComponents(t *testing.T) { } response, err := request.LoadComponents() - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, response) assert.Equal(t, "test", response[0].Name) assert.Equal(t, "testtype", response[0].Spec.Type) diff --git a/pkg/components/local_loader_test.go b/pkg/components/local_loader_test.go index d87dcdbd5a7..8869ac2f0ac 100644 --- a/pkg/components/local_loader_test.go +++ b/pkg/components/local_loader_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const configPrefix = "." @@ -50,10 +51,10 @@ spec: value: value2 ` remove, err := writeTempConfig(filename, yaml) - assert.Nil(t, err) + require.NoError(t, err) defer remove() components, err := request.LoadComponents() - assert.Nil(t, err) + require.NoError(t, err) assert.Len(t, components, 1) }) @@ -68,18 +69,18 @@ kind: Component metadata: name: statestore` remove, err := writeTempConfig(filename, yaml) - assert.Nil(t, err) + require.NoError(t, err) defer remove() components, err := request.LoadComponents() - assert.Nil(t, err) - assert.Len(t, components, 0) + require.NoError(t, err) + assert.Empty(t, components) }) t.Run("load components file not exist", func(t *testing.T) { request := NewLocalComponents("test-path-no-exists") components, err := request.LoadComponents() - assert.NotNil(t, err) - assert.Len(t, components, 0) + require.Error(t, err) + assert.Empty(t, components) }) } diff --git a/pkg/components/lock/lock_config_test.go b/pkg/components/lock/lock_config_test.go index 99d15e51800..a0c1586d1ee 100644 --- a/pkg/components/lock/lock_config_test.go +++ b/pkg/components/lock/lock_config_test.go @@ -56,9 +56,9 @@ func TestGetModifiedLockKey(t *testing.T) { err := SaveLockConfiguration(item.storename, map[string]string{ strategyKey: item.prefix, }) - require.Nil(t, err) + require.NoError(t, err) _, err = GetModifiedLockKey(item.key, item.storename, "") - require.NotNil(t, err) + require.Error(t, err) } } diff --git a/pkg/components/lock/registry_test.go b/pkg/components/lock/registry_test.go index 2d97a5e5a8c..3c227a69fe9 100644 --- a/pkg/components/lock/registry_test.go +++ b/pkg/components/lock/registry_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/dapr/components-contrib/lock" "github.com/dapr/kit/logger" @@ -42,5 +42,5 @@ func TestAliasing(t *testing.T) { return nil }, "", alias) _, err := r.Create("lock."+alias, "", "") - assert.Nil(t, err) + require.NoError(t, err) } diff --git a/pkg/components/middleware/http/registry_test.go b/pkg/components/middleware/http/registry_test.go index fc687fc8b4f..7cdeeb34b7f 100644 --- a/pkg/components/middleware/http/registry_test.go +++ b/pkg/components/middleware/http/registry_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" h "github.com/dapr/components-contrib/middleware" "github.com/dapr/dapr/pkg/components/middleware/http" @@ -66,21 +67,21 @@ func TestRegistry(t *testing.T) { // assert v0 and v1 p, e := testRegistry.Create(componentName, "v0", metadata, "") - assert.NoError(t, e) - assert.True(t, reflect.ValueOf(mock) == reflect.ValueOf(p)) + require.NoError(t, e) + assert.Equal(t, reflect.ValueOf(mock), reflect.ValueOf(p)) p, e = testRegistry.Create(componentName, "v1", metadata, "") - assert.NoError(t, e) - assert.True(t, reflect.ValueOf(mock) == reflect.ValueOf(p)) + require.NoError(t, e) + assert.Equal(t, reflect.ValueOf(mock), reflect.ValueOf(p)) // assert v2 pV2, e := testRegistry.Create(componentName, "v2", metadata, "") - assert.NoError(t, e) - assert.True(t, reflect.ValueOf(mockV2) == reflect.ValueOf(pV2)) + require.NoError(t, e) + assert.Equal(t, reflect.ValueOf(mockV2), reflect.ValueOf(pV2)) // check case-insensitivity pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", metadata, "") - assert.NoError(t, e) - assert.True(t, reflect.ValueOf(mockV2) == reflect.ValueOf(pV2)) + require.NoError(t, e) + assert.Equal(t, reflect.ValueOf(mockV2), reflect.ValueOf(pV2)) }) t.Run("middleware is not registered", func(t *testing.T) { diff --git a/pkg/components/nameresolution/registry_test.go b/pkg/components/nameresolution/registry_test.go index b211478fa19..72419e5e533 100644 --- a/pkg/components/nameresolution/registry_test.go +++ b/pkg/components/nameresolution/registry_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" nr "github.com/dapr/components-contrib/nameresolution" "github.com/dapr/dapr/pkg/components/nameresolution" @@ -52,20 +53,20 @@ func TestRegistry(t *testing.T) { // assert v0 and v1 p, e := testRegistry.Create(resolverName, "v0", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) p, e = testRegistry.Create(resolverName, "v1", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) // assert v2 pV2, e := testRegistry.Create(resolverName, "v2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) // check case-insensitivity pV2, e = testRegistry.Create(strings.ToUpper(resolverName), "V2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) }) diff --git a/pkg/components/pluggable/discovery_test.go b/pkg/components/pluggable/discovery_test.go index d7618a7e9b6..8c9352c0296 100644 --- a/pkg/components/pluggable/discovery_test.go +++ b/pkg/components/pluggable/discovery_test.go @@ -57,7 +57,7 @@ func TestServiceCallback(t *testing.T) { called := 0 AddServiceDiscoveryCallback(fakeServiceName, func(name string, _ GRPCConnectionDialer) { called++ - assert.Equal(t, name, fakeComponentName) + assert.Equal(t, fakeComponentName, name) }) callback([]service{{protoRef: fakeServiceName, componentName: fakeComponentName}}) assert.Equal(t, 1, called) @@ -81,7 +81,7 @@ func TestConnectionCloser(t *testing.T) { closer := reflectServiceConnectionCloser(fakeCloser, fakeService) closer() assert.Len(t, callOrder, 2) - assert.Equal(t, callOrder, []string{reset, close}) + assert.Equal(t, []string{reset, close}, callOrder) }) } @@ -132,7 +132,7 @@ func TestComponentDiscovery(t *testing.T) { _, err = serviceDiscovery(func(string) (reflectServiceClient, func(), error) { return nil, nil, errors.New("fake-err") }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(0), reflectService.listServicesCalled.Load()) }) t.Run("serviceDiscovery should return an error when list services return an error", func(t *testing.T) { @@ -154,7 +154,7 @@ func TestComponentDiscovery(t *testing.T) { _, err = serviceDiscovery(func(string) (reflectServiceClient, func(), error) { return reflectService, func() {}, nil }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), reflectService.listServicesCalled.Load()) }) t.Run("serviceDiscovery should return all services list", func(t *testing.T) { @@ -190,20 +190,20 @@ func TestComponentDiscovery(t *testing.T) { func TestRemoveExt(t *testing.T) { t.Run("remove ext should remove file extension when it has one", func(t *testing.T) { - assert.Equal(t, removeExt("a.sock"), "a") + assert.Equal(t, "a", removeExt("a.sock")) }) t.Run("remove ext should not change file name when it has no extension", func(t *testing.T) { - assert.Equal(t, removeExt("a"), "a") + assert.Equal(t, "a", removeExt("a")) }) } func TestGetSocketFolder(t *testing.T) { t.Run("get socket folder should use default when env var is not set", func(t *testing.T) { - assert.Equal(t, GetSocketFolderPath(), defaultSocketFolder) + assert.Equal(t, defaultSocketFolder, GetSocketFolderPath()) }) t.Run("get socket folder should use env var when set", func(t *testing.T) { const fakeSocketFolder = "/tmp" t.Setenv(SocketFolderEnvVar, fakeSocketFolder) - assert.Equal(t, GetSocketFolderPath(), fakeSocketFolder) + assert.Equal(t, fakeSocketFolder, GetSocketFolderPath()) }) } diff --git a/pkg/components/pluggable/errors_test.go b/pkg/components/pluggable/errors_test.go index 9939e09af83..4240b06df51 100644 --- a/pkg/components/pluggable/errors_test.go +++ b/pkg/components/pluggable/errors_test.go @@ -37,7 +37,7 @@ func TestComposeErrorsConverters(t *testing.T) { }) composed := outer.Compose(inner) err := composed(*status.New(codes.Unknown, "")) - require.NotNil(t, err) + require.Error(t, err) assert.Equal(t, 0, outerCalled) assert.Equal(t, 1, innerCalled) }) @@ -54,7 +54,7 @@ func TestComposeErrorsConverters(t *testing.T) { }) composed := outer.Compose(inner) err := composed(*status.New(codes.Unknown, "")) - require.NotNil(t, err) + require.Error(t, err) assert.Equal(t, 1, outerCalled) assert.Equal(t, 1, innerCalled) }) @@ -82,7 +82,7 @@ func TestErrorsMerge(t *testing.T) { f, ok := merged[codes.Canceled] assert.True(t, ok) err := f(*status.New(codes.Unknown, "")) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, 1, innerCalled) assert.Equal(t, 1, outerCalled) }) diff --git a/pkg/components/pubsub/pluggable.go b/pkg/components/pubsub/pluggable.go index 9940bdf8ede..dbc6bfe61e0 100644 --- a/pkg/components/pubsub/pluggable.go +++ b/pkg/components/pubsub/pluggable.go @@ -59,8 +59,8 @@ func (p *grpcPubSub) Init(ctx context.Context, metadata pubsub.Metadata) error { return err } - p.features = make([]pubsub.Feature, len(featureResponse.Features)) - for idx, f := range featureResponse.Features { + p.features = make([]pubsub.Feature, len(featureResponse.GetFeatures())) + for idx, f := range featureResponse.GetFeatures() { p.features[idx] = pubsub.Feature(f) } @@ -103,11 +103,11 @@ func (p *grpcPubSub) BulkPublish(ctx context.Context, req *pubsub.BulkPublishReq return pubsub.BulkPublishResponse{}, err } - failedEntries := make([]pubsub.BulkPublishResponseFailedEntry, len(response.FailedEntries)) - for i, failedEntry := range response.FailedEntries { + failedEntries := make([]pubsub.BulkPublishResponseFailedEntry, len(response.GetFailedEntries())) + for i, failedEntry := range response.GetFailedEntries() { failedEntries[i] = pubsub.BulkPublishResponseFailedEntry{ - EntryId: failedEntry.EntryId, - Error: errors.New(failedEntry.Error), + EntryId: failedEntry.GetEntryId(), + Error: errors.New(failedEntry.GetError()), } } @@ -123,15 +123,15 @@ func (p *grpcPubSub) adaptHandler(ctx context.Context, streamingPull proto.PubSu safeSend := &sync.Mutex{} return func(msg *proto.PullMessagesResponse) { m := pubsub.NewMessage{ - Data: msg.Data, + Data: msg.GetData(), ContentType: &msg.ContentType, - Topic: msg.TopicName, - Metadata: msg.Metadata, + Topic: msg.GetTopicName(), + Metadata: msg.GetMetadata(), } var ackError *proto.AckMessageError if err := handler(ctx, &m); err != nil { - p.logger.Errorf("error when handling message on topic %s", msg.TopicName) + p.logger.Errorf("error when handling message on topic %s", msg.GetTopicName()) ackError = &proto.AckMessageError{ Message: err.Error(), } @@ -147,10 +147,10 @@ func (p *grpcPubSub) adaptHandler(ctx context.Context, streamingPull proto.PubSu defer safeSend.Unlock() if err := streamingPull.Send(&proto.PullMessagesRequest{ - AckMessageId: msg.Id, + AckMessageId: msg.GetId(), AckError: ackError, }); err != nil { - p.logger.Errorf("error when ack'ing message %s from topic %s", msg.Id, msg.TopicName) + p.logger.Errorf("error when ack'ing message %s from topic %s", msg.GetId(), msg.GetTopicName()) } } } @@ -171,7 +171,7 @@ func (p *grpcPubSub) pullMessages(ctx context.Context, topic *proto.Topic, handl cleanup := func() { if closeErr := pull.CloseSend(); closeErr != nil { - p.logger.Warnf("could not close pull stream of topic %s: %v", topic.Name, closeErr) + p.logger.Warnf("could not close pull stream of topic %s: %v", topic.GetName(), closeErr) } cancel() } @@ -196,7 +196,7 @@ func (p *grpcPubSub) pullMessages(ctx context.Context, topic *proto.Topic, handl return } - p.logger.Debugf("received message from stream on topic %s", msg.TopicName) + p.logger.Debugf("received message from stream on topic %s", msg.GetTopicName()) go handle(msg) } diff --git a/pkg/components/pubsub/pluggable_test.go b/pkg/components/pubsub/pluggable_test.go index 5a9dd4c2f6c..79d43d40799 100644 --- a/pkg/components/pubsub/pluggable_test.go +++ b/pkg/components/pubsub/pluggable_test.go @@ -166,7 +166,7 @@ func TestPubSubPluggableCalls(t *testing.T) { assert.Empty(t, ps.Features()) ps.features = []pubsub.Feature{pubsub.FeatureMessageTTL} assert.NotEmpty(t, ps.Features()) - assert.Equal(t, ps.Features()[0], pubsub.FeatureMessageTTL) + assert.Equal(t, pubsub.FeatureMessageTTL, ps.Features()[0]) }) t.Run("publish should call publish grpc method", func(t *testing.T) { @@ -174,7 +174,7 @@ func TestPubSubPluggableCalls(t *testing.T) { svc := &server{ onPublishCalled: func(req *proto.PublishRequest) { - assert.Equal(t, req.Topic, fakeTopic) + assert.Equal(t, fakeTopic, req.GetTopic()) }, } ps, cleanup, err := getPubSub(svc) @@ -194,7 +194,7 @@ func TestPubSubPluggableCalls(t *testing.T) { svc := &server{ onPublishCalled: func(req *proto.PublishRequest) { - assert.Equal(t, req.Topic, fakeTopic) + assert.Equal(t, fakeTopic, req.GetTopic()) }, publishErr: errors.New("fake-publish-err"), } @@ -206,7 +206,7 @@ func TestPubSubPluggableCalls(t *testing.T) { Topic: fakeTopic, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.publishCalled.Load()) }) @@ -246,12 +246,12 @@ func TestPubSubPluggableCalls(t *testing.T) { svc := &server{ pullChan: messageChan, onAckReceived: func(ma *proto.PullMessagesRequest) { - if ma.Topic != nil { + if ma.GetTopic() != nil { topicSent.Done() } else { messagesAcked.Done() } - if ma.AckError != nil { + if ma.GetAckError() != nil { totalAckErrors.Add(1) } }, diff --git a/pkg/components/pubsub/registry_test.go b/pkg/components/pubsub/registry_test.go index a8dce903ad0..28d2c70cace 100644 --- a/pkg/components/pubsub/registry_test.go +++ b/pkg/components/pubsub/registry_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/dapr/components-contrib/pubsub" daprt "github.com/dapr/dapr/pkg/testing" @@ -67,21 +68,21 @@ func TestCreatePubSub(t *testing.T) { // assert v0 and v1 p, e := testRegistry.Create(componentName, "v0", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockPubSub, p) p, e = testRegistry.Create(componentName, "v1", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockPubSub, p) // assert v2 pV2, e := testRegistry.Create(componentName, "v2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockPubSubV2, pV2) // check case-insensitivity pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockPubSubV2, pV2) }) diff --git a/pkg/components/secretstores/pluggable.go b/pkg/components/secretstores/pluggable.go index fb1624af5e9..7a9c7dbd2ba 100644 --- a/pkg/components/secretstores/pluggable.go +++ b/pkg/components/secretstores/pluggable.go @@ -53,8 +53,8 @@ func (gss *grpcSecretStore) Init(ctx context.Context, metadata secretstores.Meta return err } - gss.features = make([]secretstores.Feature, len(featureResponse.Features)) - for idx, f := range featureResponse.Features { + gss.features = make([]secretstores.Feature, len(featureResponse.GetFeatures())) + for idx, f := range featureResponse.GetFeatures() { gss.features[idx] = secretstores.Feature(f) } @@ -76,7 +76,7 @@ func (gss *grpcSecretStore) GetSecret(ctx context.Context, req secretstores.GetS return secretstores.GetSecretResponse{}, err } return secretstores.GetSecretResponse{ - Data: resp.Data, + Data: resp.GetData(), }, nil } diff --git a/pkg/components/secretstores/pluggable_test.go b/pkg/components/secretstores/pluggable_test.go index 0233d8b3309..d285162e66d 100644 --- a/pkg/components/secretstores/pluggable_test.go +++ b/pkg/components/secretstores/pluggable_test.go @@ -153,7 +153,7 @@ func TestComponentCalls(t *testing.T) { errStr := "secret not found" svc := &server{ onGetSecret: func(req *proto.GetSecretRequest) { - assert.Equal(t, key, req.Key) + assert.Equal(t, key, req.GetKey()) }, getSecretErr: errors.New(errStr), } @@ -212,7 +212,7 @@ func TestComponentCalls(t *testing.T) { err = gSecretStores.Ping() - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.pingCalled.Load()) }) } diff --git a/pkg/components/secretstores/registry_test.go b/pkg/components/secretstores/registry_test.go index 52fbfd6b499..5f4d8af0218 100644 --- a/pkg/components/secretstores/registry_test.go +++ b/pkg/components/secretstores/registry_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ss "github.com/dapr/components-contrib/secretstores" "github.com/dapr/dapr/pkg/components/secretstores" @@ -53,20 +54,20 @@ func TestRegistry(t *testing.T) { // assert v0 and v1 p, e := testRegistry.Create(componentName, "v0", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) p, e = testRegistry.Create(componentName, "v1", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) // assert v2 pV2, e := testRegistry.Create(componentName, "v2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) // check case-insensitivity pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) }) diff --git a/pkg/components/state/pluggable.go b/pkg/components/state/pluggable.go index 4b69d0ffdfb..1fc17f41d07 100644 --- a/pkg/components/state/pluggable.go +++ b/pkg/components/state/pluggable.go @@ -172,8 +172,8 @@ func (ss *grpcStateStore) Init(ctx context.Context, metadata state.Metadata) err return err } - ss.features = make([]state.Feature, len(featureResponse.Features)) - for idx, f := range featureResponse.Features { + ss.features = make([]state.Feature, len(featureResponse.GetFeatures())) + for idx, f := range featureResponse.GetFeatures() { ss.features[idx] = state.Feature(f) } @@ -254,15 +254,15 @@ func (ss *grpcStateStore) BulkGet(ctx context.Context, req []state.GetRequest, o return nil, err } - items := make([]state.BulkGetResponse, len(bulkGetResponse.Items)) - for idx, resp := range bulkGetResponse.Items { + items := make([]state.BulkGetResponse, len(bulkGetResponse.GetItems())) + for idx, resp := range bulkGetResponse.GetItems() { items[idx] = state.BulkGetResponse{ Key: resp.GetKey(), Data: resp.GetData(), ETag: fromETagResponse(resp.GetEtag()), Metadata: resp.GetMetadata(), - Error: resp.Error, - ContentType: strNilIfEmpty(resp.ContentType), + Error: resp.GetError(), + ContentType: strNilIfEmpty(resp.GetContentType()), } } return items, nil @@ -370,23 +370,23 @@ func toQuery(req query.Query) (*proto.Query, error) { } func fromQueryResponse(resp *proto.QueryResponse) *state.QueryResponse { - results := make([]state.QueryItem, len(resp.Items)) + results := make([]state.QueryItem, len(resp.GetItems())) - for idx, item := range resp.Items { + for idx, item := range resp.GetItems() { itemIdx := state.QueryItem{ - Key: item.Key, - Data: item.Data, - ETag: fromETagResponse(item.Etag), - Error: item.Error, - ContentType: strNilIfEmpty(item.ContentType), + Key: item.GetKey(), + Data: item.GetData(), + ETag: fromETagResponse(item.GetEtag()), + Error: item.GetError(), + ContentType: strNilIfEmpty(item.GetContentType()), } results[idx] = itemIdx } return &state.QueryResponse{ Results: results, - Token: resp.Token, - Metadata: resp.Metadata, + Token: resp.GetToken(), + Metadata: resp.GetMetadata(), } } @@ -445,7 +445,7 @@ func fromGetResponse(resp *proto.GetResponse) *state.GetResponse { Data: resp.GetData(), ETag: fromETagResponse(resp.GetEtag()), Metadata: resp.GetMetadata(), - ContentType: strNilIfEmpty(resp.ContentType), + ContentType: strNilIfEmpty(resp.GetContentType()), } } diff --git a/pkg/components/state/pluggable_test.go b/pkg/components/state/pluggable_test.go index ef38a169453..fc813d2f52b 100644 --- a/pkg/components/state/pluggable_test.go +++ b/pkg/components/state/pluggable_test.go @@ -227,7 +227,7 @@ func TestComponentCalls(t *testing.T) { assert.Empty(t, stStore.Features()) stStore.features = []state.Feature{state.FeatureETag} assert.NotEmpty(t, stStore.Features()) - assert.Equal(t, stStore.Features()[0], state.FeatureETag) + assert.Equal(t, state.FeatureETag, stStore.Features()[0]) }) t.Run("delete should call delete grpc method", func(t *testing.T) { @@ -235,7 +235,7 @@ func TestComponentCalls(t *testing.T) { svc := &server{ onDeleteCalled: func(req *proto.DeleteRequest) { - assert.Equal(t, req.Key, fakeKey) + assert.Equal(t, fakeKey, req.GetKey()) }, } stStore, cleanup, err := getStateStore(svc) @@ -255,7 +255,7 @@ func TestComponentCalls(t *testing.T) { svc := &server{ onDeleteCalled: func(req *proto.DeleteRequest) { - assert.Equal(t, req.Key, fakeKey) + assert.Equal(t, fakeKey, req.GetKey()) }, deleteErr: fakeErr, } @@ -266,7 +266,7 @@ func TestComponentCalls(t *testing.T) { Key: fakeKey, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.deleteCalled.Load()) }) @@ -279,13 +279,13 @@ func TestComponentCalls(t *testing.T) { Description: desc, } br := &errdetails.BadRequest{} - br.FieldViolations = append(br.FieldViolations, v) + br.FieldViolations = append(br.GetFieldViolations(), v) st, err := st.WithDetails(br) require.NoError(t, err) svc := &server{ onDeleteCalled: func(req *proto.DeleteRequest) { - assert.Equal(t, req.Key, fakeKey) + assert.Equal(t, fakeKey, req.GetKey()) }, deleteErr: st.Err(), } @@ -296,10 +296,10 @@ func TestComponentCalls(t *testing.T) { Key: fakeKey, }) - assert.NotNil(t, err) + require.Error(t, err) etag, ok := err.(*state.ETagError) require.True(t, ok) - assert.Equal(t, etag.Kind(), state.ETagMismatch) + assert.Equal(t, state.ETagMismatch, etag.Kind()) assert.Equal(t, int64(1), svc.deleteCalled.Load()) }) @@ -312,13 +312,13 @@ func TestComponentCalls(t *testing.T) { Description: desc, } br := &errdetails.BadRequest{} - br.FieldViolations = append(br.FieldViolations, v) + br.FieldViolations = append(br.GetFieldViolations(), v) st, err := st.WithDetails(br) require.NoError(t, err) svc := &server{ onDeleteCalled: func(req *proto.DeleteRequest) { - assert.Equal(t, req.Key, fakeKey) + assert.Equal(t, fakeKey, req.GetKey()) }, deleteErr: st.Err(), } @@ -329,10 +329,10 @@ func TestComponentCalls(t *testing.T) { Key: fakeKey, }) - assert.NotNil(t, err) + require.Error(t, err) etag, ok := err.(*state.ETagError) require.True(t, ok) - assert.Equal(t, etag.Kind(), state.ETagInvalid) + assert.Equal(t, state.ETagInvalid, etag.Kind()) assert.Equal(t, int64(1), svc.deleteCalled.Load()) }) @@ -341,7 +341,7 @@ func TestComponentCalls(t *testing.T) { svc := &server{ onGetCalled: func(req *proto.GetRequest) { - assert.Equal(t, req.Key, fakeKey) + assert.Equal(t, fakeKey, req.GetKey()) }, getErr: errors.New("my-fake-err"), } @@ -353,7 +353,7 @@ func TestComponentCalls(t *testing.T) { Key: fakeKey, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.getCalled.Load()) assert.Nil(t, resp) }) @@ -363,7 +363,7 @@ func TestComponentCalls(t *testing.T) { svc := &server{ onGetCalled: func(req *proto.GetRequest) { - assert.Equal(t, req.Key, fakeKey) + assert.Equal(t, fakeKey, req.GetKey()) }, } stStore, cleanup, err := getStateStore(svc) @@ -374,7 +374,7 @@ func TestComponentCalls(t *testing.T) { Key: fakeKey, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.getCalled.Load()) assert.Nil(t, resp) }) @@ -385,7 +385,7 @@ func TestComponentCalls(t *testing.T) { svc := &server{ onGetCalled: func(req *proto.GetRequest) { - assert.Equal(t, req.Key, fakeKey) + assert.Equal(t, fakeKey, req.GetKey()) }, getResponse: &proto.GetResponse{ Data: fakeData, @@ -409,8 +409,8 @@ func TestComponentCalls(t *testing.T) { svc := &server{ onSetCalled: func(req *proto.SetRequest) { - assert.Equal(t, req.Key, fakeKey) - assert.Equal(t, req.Value, []byte(wrapString(fakeData))) + assert.Equal(t, fakeKey, req.GetKey()) + assert.Equal(t, []byte(wrapString(fakeData)), req.GetValue()) }, setErr: errors.New("fake-set-err"), } @@ -423,7 +423,7 @@ func TestComponentCalls(t *testing.T) { Value: fakeData, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.setCalled.Load()) }) @@ -432,8 +432,8 @@ func TestComponentCalls(t *testing.T) { svc := &server{ onSetCalled: func(req *proto.SetRequest) { - assert.Equal(t, req.Key, fakeKey) - assert.Equal(t, req.Value, []byte(wrapString(fakeData))) + assert.Equal(t, fakeKey, req.GetKey()) + assert.Equal(t, []byte(wrapString(fakeData)), req.GetValue()) }, } stStore, cleanup, err := getStateStore(svc) @@ -471,7 +471,7 @@ func TestComponentCalls(t *testing.T) { err = stStore.Ping() - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.pingCalled.Load()) }) @@ -485,7 +485,7 @@ func TestComponentCalls(t *testing.T) { err = stStore.BulkSet(context.Background(), []state.SetRequest{}, state.BulkStoreOpts{}) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.bulkSetCalled.Load()) }) @@ -506,7 +506,7 @@ func TestComponentCalls(t *testing.T) { err = stStore.BulkSet(context.Background(), requests, state.BulkStoreOpts{}) - assert.ErrorIs(t, ErrNilSetValue, err) + require.ErrorIs(t, ErrNilSetValue, err) assert.Equal(t, int64(0), svc.bulkSetCalled.Load()) }) @@ -524,7 +524,7 @@ func TestComponentCalls(t *testing.T) { } svc := &server{ onBulkSetCalled: func(bsr *proto.BulkSetRequest) { - assert.Len(t, bsr.Items, len(requests)) + assert.Len(t, bsr.GetItems(), len(requests)) }, } stStore, cleanup, err := getStateStore(svc) @@ -549,7 +549,7 @@ func TestComponentCalls(t *testing.T) { } svc := &server{ onBulkDeleteCalled: func(bsr *proto.BulkDeleteRequest) { - assert.Len(t, bsr.Items, len(requests)) + assert.Len(t, bsr.GetItems(), len(requests)) }, } stStore, cleanup, err := getStateStore(svc) @@ -571,7 +571,7 @@ func TestComponentCalls(t *testing.T) { svc := &server{ bulkDeleteErr: errors.New("fake-bulk-delete-err"), onBulkDeleteCalled: func(bsr *proto.BulkDeleteRequest) { - assert.Len(t, bsr.Items, len(requests)) + assert.Len(t, bsr.GetItems(), len(requests)) }, } stStore, cleanup, err := getStateStore(svc) @@ -580,7 +580,7 @@ func TestComponentCalls(t *testing.T) { err = stStore.BulkDelete(context.Background(), requests, state.BulkStoreOpts{}) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.bulkDeleteCalled.Load()) }) @@ -603,7 +603,7 @@ func TestComponentCalls(t *testing.T) { svc := &server{ bulkDeleteErr: st.Err(), onBulkDeleteCalled: func(bsr *proto.BulkDeleteRequest) { - assert.Len(t, bsr.Items, len(requests)) + assert.Len(t, bsr.GetItems(), len(requests)) }, } stStore, cleanup, err := getStateStore(svc) @@ -612,7 +612,7 @@ func TestComponentCalls(t *testing.T) { err = stStore.BulkDelete(context.Background(), requests, state.BulkStoreOpts{}) - assert.NotNil(t, err) + require.Error(t, err) _, ok := err.(*state.BulkDeleteRowMismatchError) require.True(t, ok) assert.Equal(t, int64(1), svc.bulkDeleteCalled.Load()) @@ -633,7 +633,7 @@ func TestComponentCalls(t *testing.T) { resp, err := stStore.BulkGet(context.Background(), requests, state.BulkGetOpts{}) - assert.NotNil(t, err) + require.Error(t, err) assert.Nil(t, resp) assert.Equal(t, int64(1), svc.bulkGetCalled.Load()) }) @@ -654,7 +654,7 @@ func TestComponentCalls(t *testing.T) { svc := &server{ onBulkGetCalled: func(bsr *proto.BulkGetRequest) { - assert.Len(t, bsr.Items, len(requests)) + assert.Len(t, bsr.GetItems(), len(requests)) }, bulkGetResponse: &proto.BulkGetResponse{ Items: respItems, @@ -685,7 +685,7 @@ func TestComponentCalls(t *testing.T) { Metadata: map[string]string{}, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, int64(1), svc.transactCalled.Load()) }) @@ -703,7 +703,7 @@ func TestComponentCalls(t *testing.T) { } svc := &server{ onTransactCalled: func(bsr *proto.TransactionalStateRequest) { - assert.Len(t, bsr.Operations, len(operations)) + assert.Len(t, bsr.GetOperations(), len(operations)) }, } stStore, cleanup, err := getStateStore(svc) @@ -731,7 +731,7 @@ func TestComponentCalls(t *testing.T) { resp, err := stStore.Query(context.Background(), &state.QueryRequest{}) - assert.NotNil(t, err) + require.Error(t, err) assert.Nil(t, resp) assert.Equal(t, int64(1), svc.queryCalled.Load()) }) @@ -759,7 +759,7 @@ func TestComponentCalls(t *testing.T) { } svc := &server{ onQueryCalled: func(bsr *proto.QueryRequest) { - assert.Len(t, bsr.Query.Filter, len(filters)) + assert.Len(t, bsr.GetQuery().GetFilter(), len(filters)) }, queryResp: &proto.QueryResponse{ Items: results, @@ -813,9 +813,9 @@ func TestMappers(t *testing.T) { Consistency: state.Eventual, }, }) - assert.Equal(t, getRequest.Key, fakeKey) - assert.Equal(t, getRequest.Metadata[fakeKey], fakeKey) - assert.Equal(t, getRequest.Consistency, proto.StateOptions_CONSISTENCY_EVENTUAL) + assert.Equal(t, fakeKey, getRequest.GetKey()) + assert.Equal(t, fakeKey, getRequest.GetMetadata()[fakeKey]) + assert.Equal(t, proto.StateOptions_CONSISTENCY_EVENTUAL, getRequest.GetConsistency()) }) t.Run("fromGetResponse should map all properties from the given response", func(t *testing.T) { @@ -843,7 +843,7 @@ func TestMappers(t *testing.T) { fakeETag := "this" etagRequest := toETagRequest(&fakeETag) assert.NotNil(t, etagRequest) - assert.Equal(t, etagRequest.Value, fakeETag) + assert.Equal(t, etagRequest.GetValue(), fakeETag) }) t.Run("fromETagResponse should return nil when receiving a nil etag response", func(t *testing.T) { @@ -880,14 +880,14 @@ func TestMappers(t *testing.T) { }) require.NoError(t, err) assert.NotNil(t, req) - assert.Equal(t, req.Key, fakeKey) - assert.NotNil(t, req.Value) + assert.Equal(t, fakeKey, req.GetKey()) + assert.NotNil(t, req.GetValue()) if v, ok := fakeValue.(string); ok { - assert.Equal(t, string(req.Value), wrapString(v)) + assert.Equal(t, string(req.GetValue()), wrapString(v)) } - assert.Equal(t, req.Metadata[fakeKey], fakePropValue) - assert.Equal(t, req.Options.Concurrency, proto.StateOptions_CONCURRENCY_LAST_WRITE) - assert.Equal(t, req.Options.Consistency, proto.StateOptions_CONSISTENCY_EVENTUAL) + assert.Equal(t, fakePropValue, req.GetMetadata()[fakeKey]) + assert.Equal(t, proto.StateOptions_CONCURRENCY_LAST_WRITE, req.GetOptions().GetConcurrency()) + assert.Equal(t, proto.StateOptions_CONSISTENCY_EVENTUAL, req.GetOptions().GetConsistency()) } }) @@ -909,17 +909,17 @@ func TestMappers(t *testing.T) { }) require.NoError(t, err) assert.NotNil(t, req) - assert.Equal(t, req.Key, fakeKey) - assert.NotNil(t, req.Value) - assert.Equal(t, req.Metadata[fakeKey], fakePropValue) - assert.Equal(t, req.Options.Concurrency, proto.StateOptions_CONCURRENCY_LAST_WRITE) - assert.Equal(t, req.Options.Consistency, proto.StateOptions_CONSISTENCY_EVENTUAL) + assert.Equal(t, fakeKey, req.GetKey()) + assert.NotNil(t, req.GetValue()) + assert.Equal(t, fakePropValue, req.GetMetadata()[fakeKey]) + assert.Equal(t, proto.StateOptions_CONCURRENCY_LAST_WRITE, req.GetOptions().GetConcurrency()) + assert.Equal(t, proto.StateOptions_CONSISTENCY_EVENTUAL, req.GetOptions().GetConsistency()) } t.Run("toTransact should return err when type is unrecognized", func(t *testing.T) { req, err := toTransactOperation(failingTransactOperation{}) assert.Nil(t, req) - assert.ErrorIs(t, err, ErrTransactOperationNotSupported) + require.ErrorIs(t, err, ErrTransactOperationNotSupported) }) t.Run("toTransact should return set operation when type is SetOperation", func(t *testing.T) { @@ -930,7 +930,7 @@ func TestMappers(t *testing.T) { }) require.NoError(t, err) assert.NotNil(t, req) - assert.IsType(t, &proto.TransactionalStateOperation_Set{}, req.Request) + assert.IsType(t, &proto.TransactionalStateOperation_Set{}, req.GetRequest()) }) t.Run("toTransact should return delete operation when type is SetOperation", func(t *testing.T) { @@ -939,7 +939,7 @@ func TestMappers(t *testing.T) { }) require.NoError(t, err) assert.NotNil(t, req) - assert.IsType(t, &proto.TransactionalStateOperation_Delete{}, req.Request) + assert.IsType(t, &proto.TransactionalStateOperation_Delete{}, req.GetRequest()) }) }) } diff --git a/pkg/components/state/registry_test.go b/pkg/components/state/registry_test.go index e08a20a5849..d7dbbd9150a 100644 --- a/pkg/components/state/registry_test.go +++ b/pkg/components/state/registry_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" s "github.com/dapr/components-contrib/state" "github.com/dapr/dapr/pkg/components" @@ -73,44 +74,44 @@ func TestRegistry(t *testing.T) { // assert v0 and v1 p, e := testRegistry.Create(componentName, "v0", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) p, e = testRegistry.Create(componentName, "v1", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mock, p) // assert v2 pV2, e := testRegistry.Create(componentName, "v2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) // check case-insensitivity pV2, e = testRegistry.Create(strings.ToUpper(componentName), "V2", "") - assert.NoError(t, e) + require.NoError(t, e) assert.Same(t, mockV2, pV2) // Check availability of foo versions p, err := testRegistry.Create("state.foo", "v1", "") - assert.NoError(t, err) + require.NoError(t, err) assert.Same(t, fooV1, p) p, err = testRegistry.Create("state.foo", "v2", "") - assert.NoError(t, err) + require.NoError(t, err) assert.Same(t, fooV2, p) p, err = testRegistry.Create("state.foo", "v3", "") - assert.NoError(t, err) + require.NoError(t, err) assert.Same(t, fooV3, p) p, err = testRegistry.Create("state.foo", "v4", "") - assert.NoError(t, err) + require.NoError(t, err) assert.Same(t, fooV4, p) p, err = testRegistry.Create("state.foo", "v5", "") - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, p) p, err = testRegistry.Create("state.foo", "", "") - assert.NoError(t, err) + require.NoError(t, err) assert.Same(t, fooV1, p) p, err = testRegistry.Create("state.foo", "v0", "") - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, p) }) diff --git a/pkg/components/state/state_config_test.go b/pkg/components/state/state_config_test.go index 04b5c73f382..c483ae167ce 100644 --- a/pkg/components/state/state_config_test.go +++ b/pkg/components/state/state_config_test.go @@ -59,9 +59,9 @@ func TestGetModifiedStateKey(t *testing.T) { err := SaveStateConfiguration(item.storename, map[string]string{ strategyKey: item.prefix, }) - require.Nil(t, err) + require.NoError(t, err) _, err = GetModifiedStateKey(item.key, item.storename, "") - require.NotNil(t, err) + require.Error(t, err) } } @@ -173,14 +173,14 @@ func TestStateConfigRace(t *testing.T) { defer wg.Done() for i := 0; i < iterations; i++ { err := SaveStateConfiguration(fmt.Sprintf("store%d", i), map[string]string{strategyKey: strategyNone}) - require.Nil(t, err) + require.NoError(t, err) } }() go func() { defer wg.Done() for i := 0; i < iterations; i++ { _, err := GetModifiedStateKey(key, fmt.Sprintf("store%d", i), "appid") - require.Nil(t, err) + require.NoError(t, err) } }() wg.Wait() @@ -193,14 +193,14 @@ func TestStateConfigRace(t *testing.T) { defer wg.Done() for i := 0; i < iterations; i++ { _, err := GetModifiedStateKey(key, fmt.Sprintf("store%d", i), "appid") - require.Nil(t, err) + require.NoError(t, err) } }() go func() { defer wg.Done() for i := 0; i < iterations; i++ { _, err := GetModifiedStateKey(key, fmt.Sprintf("store%d", i), "appid") - require.Nil(t, err) + require.NoError(t, err) } }() wg.Wait() diff --git a/pkg/config/configuration.go b/pkg/config/configuration.go index 7eb6d32e292..8e5a8d0d7fd 100644 --- a/pkg/config/configuration.go +++ b/pkg/config/configuration.go @@ -101,21 +101,21 @@ type AccessControlListOperationAction struct { } type ConfigurationSpec struct { - HTTPPipelineSpec *PipelineSpec `json:"httpPipeline,omitempty" yaml:"httpPipeline,omitempty"` + HTTPPipelineSpec *PipelineSpec `json:"httpPipeline,omitempty" yaml:"httpPipeline,omitempty"` AppHTTPPipelineSpec *PipelineSpec `json:"appHttpPipeline,omitempty" yaml:"appHttpPipeline,omitempty"` - TracingSpec *TracingSpec `json:"tracing,omitempty" yaml:"tracing,omitempty"` - MTLSSpec *MTLSSpec `json:"mtls,omitempty" yaml:"mtls,omitempty"` - MetricSpec *MetricSpec `json:"metric,omitempty" yaml:"metric,omitempty"` - MetricsSpec *MetricSpec `json:"metrics,omitempty" yaml:"metrics,omitempty"` - Secrets *SecretsSpec `json:"secrets,omitempty" yaml:"secrets,omitempty"` - AccessControlSpec *AccessControlSpec `json:"accessControl,omitempty" yaml:"accessControl,omitempty"` - NameResolutionSpec *NameResolutionSpec `json:"nameResolution,omitempty" yaml:"nameResolution,omitempty"` - Features []FeatureSpec `json:"features,omitempty" yaml:"features,omitempty"` - APISpec *APISpec `json:"api,omitempty" yaml:"api,omitempty"` - ComponentsSpec *ComponentsSpec `json:"components,omitempty" yaml:"components,omitempty"` - LoggingSpec *LoggingSpec `json:"logging,omitempty" yaml:"logging,omitempty"` - WasmSpec *WasmSpec `json:"wasm,omitempty" yaml:"wasm,omitempty"` - WorkflowSpec *WorkflowSpec `json:"workflow,omitempty" yaml:"workflow,omitempty"` + TracingSpec *TracingSpec `json:"tracing,omitempty" yaml:"tracing,omitempty"` + MTLSSpec *MTLSSpec `json:"mtls,omitempty" yaml:"mtls,omitempty"` + MetricSpec *MetricSpec `json:"metric,omitempty" yaml:"metric,omitempty"` + MetricsSpec *MetricSpec `json:"metrics,omitempty" yaml:"metrics,omitempty"` + Secrets *SecretsSpec `json:"secrets,omitempty" yaml:"secrets,omitempty"` + AccessControlSpec *AccessControlSpec `json:"accessControl,omitempty" yaml:"accessControl,omitempty"` + NameResolutionSpec *NameResolutionSpec `json:"nameResolution,omitempty" yaml:"nameResolution,omitempty"` + Features []FeatureSpec `json:"features,omitempty" yaml:"features,omitempty"` + APISpec *APISpec `json:"api,omitempty" yaml:"api,omitempty"` + ComponentsSpec *ComponentsSpec `json:"components,omitempty" yaml:"components,omitempty"` + LoggingSpec *LoggingSpec `json:"logging,omitempty" yaml:"logging,omitempty"` + WasmSpec *WasmSpec `json:"wasm,omitempty" yaml:"wasm,omitempty"` + WorkflowSpec *WorkflowSpec `json:"workflow,omitempty" yaml:"workflow,omitempty"` } // WorkflowSpec defines the configuration for Dapr workflows. @@ -150,10 +150,10 @@ type SecretsSpec struct { // SecretsScope defines the scope for secrets. type SecretsScope struct { - DefaultAccess string `json:"defaultAccess,omitempty" yaml:"defaultAccess,omitempty"` - StoreName string `json:"storeName,omitempty" yaml:"storeName,omitempty"` + DefaultAccess string `json:"defaultAccess,omitempty" yaml:"defaultAccess,omitempty"` + StoreName string `json:"storeName,omitempty" yaml:"storeName,omitempty"` AllowedSecrets []string `json:"allowedSecrets,omitempty" yaml:"allowedSecrets,omitempty"` - DeniedSecrets []string `json:"deniedSecrets,omitempty" yaml:"deniedSecrets,omitempty"` + DeniedSecrets []string `json:"deniedSecrets,omitempty" yaml:"deniedSecrets,omitempty"` } type PipelineSpec struct { @@ -201,9 +201,9 @@ func (r APIAccessRules) GetRulesByProtocol(protocol APIAccessRuleProtocol) map[s } type HandlerSpec struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"type,omitempty" yaml:"type,omitempty"` - Version string `json:"version,omitempty" yaml:"version,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"type,omitempty" yaml:"type,omitempty"` + Version string `json:"version,omitempty" yaml:"version,omitempty"` SelectorSpec SelectorSpec `json:"selector,omitempty" yaml:"selector,omitempty"` } @@ -223,9 +223,9 @@ type SelectorField struct { type TracingSpec struct { SamplingRate string `json:"samplingRate,omitempty" yaml:"samplingRate,omitempty"` - Stdout bool `json:"stdout,omitempty" yaml:"stdout,omitempty"` - Zipkin *ZipkinSpec `json:"zipkin,omitempty" yaml:"zipkin,omitempty"` - Otel *OtelSpec `json:"otel,omitempty" yaml:"otel,omitempty"` + Stdout bool `json:"stdout,omitempty" yaml:"stdout,omitempty"` + Zipkin *ZipkinSpec `json:"zipkin,omitempty" yaml:"zipkin,omitempty"` + Otel *OtelSpec `json:"otel,omitempty" yaml:"otel,omitempty"` } // ZipkinSpec defines Zipkin exporter configurations. @@ -235,7 +235,7 @@ type ZipkinSpec struct { // OtelSpec defines Otel exporter configurations. type OtelSpec struct { - Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"` + Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"` EndpointAddress string `json:"endpointAddress,omitempty" yaml:"endpointAddress,omitempty"` // Defaults to true IsSecure *bool `json:"isSecure,omitempty" yaml:"isSecure,omitempty"` @@ -251,7 +251,7 @@ func (o OtelSpec) GetIsSecure() bool { type MetricSpec struct { // Defaults to true Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` - Rules []MetricsRule `json:"rules,omitempty" yaml:"rules,omitempty"` + Rules []MetricsRule `json:"rules,omitempty" yaml:"rules,omitempty"` } // GetEnabled returns true if metrics are enabled. @@ -260,53 +260,53 @@ func (m MetricSpec) GetEnabled() bool { return m.Enabled == nil || *m.Enabled } -// MetricsRule defines configuration options for a metric. +// MetricsRu le defines configuration options for a metric. type MetricsRule struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` Labels []MetricLabel `json:"labels,omitempty" yaml:"labels,omitempty"` } // MetricsLabel defines an object that allows to set regex expressions for a label. type MetricLabel struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` Regex map[string]string `json:"regex,omitempty" yaml:"regex,omitempty"` } // AppPolicySpec defines the policy data structure for each app. type AppPolicySpec struct { - AppName string `json:"appId,omitempty" yaml:"appId,omitempty"` + AppName string `json:"appId,omitempty" yaml:"appId,omitempty"` DefaultAction string `json:"defaultAction,omitempty" yaml:"defaultAction,omitempty"` - TrustDomain string `json:"trustDomain,omitempty" yaml:"trustDomain,omitempty"` - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` - AppOperationActions []AppOperation `json:"operations,omitempty" yaml:"operations,omitempty"` + TrustDomain string `json:"trustDomain,omitempty" yaml:"trustDomain,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + AppOperationActions []AppOperation `json:"operations,omitempty" yaml:"operations,omitempty"` } // AppOperation defines the data structure for each app operation. type AppOperation struct { - Operation string `json:"name,omitempty" yaml:"name,omitempty"` + Operation string `json:"name,omitempty" yaml:"name,omitempty"` HTTPVerb []string `json:"httpVerb,omitempty" yaml:"httpVerb,omitempty"` - Action string `json:"action,omitempty" yaml:"action,omitempty"` + Action string `json:"action,omitempty" yaml:"action,omitempty"` } // AccessControlSpec is the spec object in ConfigurationSpec. type AccessControlSpec struct { DefaultAction string `json:"defaultAction,omitempty" yaml:"defaultAction,omitempty"` - TrustDomain string `json:"trustDomain,omitempty" yaml:"trustDomain,omitempty"` - AppPolicies []AppPolicySpec `json:"policies,omitempty" yaml:"policies,omitempty"` + TrustDomain string `json:"trustDomain,omitempty" yaml:"trustDomain,omitempty"` + AppPolicies []AppPolicySpec `json:"policies,omitempty" yaml:"policies,omitempty"` } type NameResolutionSpec struct { - Component string `json:"component,omitempty" yaml:"component,omitempty"` - Version string `json:"version,omitempty" yaml:"version,omitempty"` + Component string `json:"component,omitempty" yaml:"component,omitempty"` + Version string `json:"version,omitempty" yaml:"version,omitempty"` Configuration any `json:"configuration,omitempty" yaml:"configuration,omitempty"` } // MTLSSpec defines mTLS configuration. type MTLSSpec struct { - Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` - WorkloadCertTTL string `json:"workloadCertTTL,omitempty" yaml:"workloadCertTTL,omitempty"` - AllowedClockSkew string `json:"allowedClockSkew,omitempty" yaml:"allowedClockSkew,omitempty"` - SentryAddress string `json:"sentryAddress,omitempty" yaml:"sentryAddress,omitempty"` + Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` + WorkloadCertTTL string `json:"workloadCertTTL,omitempty" yaml:"workloadCertTTL,omitempty"` + AllowedClockSkew string `json:"allowedClockSkew,omitempty" yaml:"allowedClockSkew,omitempty"` + SentryAddress string `json:"sentryAddress,omitempty" yaml:"sentryAddress,omitempty"` ControlPlaneTrustDomain string `json:"controlPlaneTrustDomain,omitempty" yaml:"controlPlaneTrustDomain,omitempty"` // Additional token validators to use. // When Dapr is running in Kubernetes mode, this is in addition to the built-in "kubernetes" validator. @@ -334,7 +334,7 @@ func (v ValidatorSpec) OptionsMap() map[string]string { // FeatureSpec defines which preview features are enabled. type FeatureSpec struct { - Name Feature `json:"name" yaml:"name"` + Name Feature `json:"name" yaml:"name"` Enabled bool `json:"enabled" yaml:"enabled"` } diff --git a/pkg/config/configuration_test.go b/pkg/config/configuration_test.go index 01ddb29cf9b..27b5afdf03f 100644 --- a/pkg/config/configuration_test.go +++ b/pkg/config/configuration_test.go @@ -55,10 +55,10 @@ func TestLoadStandaloneConfiguration(t *testing.T) { t.Run(tc.name, func(t *testing.T) { config, err := LoadStandaloneConfiguration(tc.path) if tc.errorExpected { - assert.Error(t, err, "Expected an error") + require.Error(t, err, "Expected an error") assert.Nil(t, config, "Config should not be loaded") } else { - assert.NoError(t, err, "Unexpected error") + require.NoError(t, err, "Unexpected error") assert.NotNil(t, config, "Config not loaded as expected") } }) @@ -67,14 +67,14 @@ func TestLoadStandaloneConfiguration(t *testing.T) { t.Run("parse environment variables", func(t *testing.T) { t.Setenv("DAPR_SECRET", "keepitsecret") config, err := LoadStandaloneConfiguration("./testdata/env_variables_config.yaml") - assert.NoError(t, err, "Unexpected error") + require.NoError(t, err, "Unexpected error") assert.NotNil(t, config, "Config not loaded as expected") assert.Equal(t, "keepitsecret", config.Spec.Secrets.Scopes[0].AllowedSecrets[0]) }) t.Run("check Kind and Name", func(t *testing.T) { config, err := LoadStandaloneConfiguration("./testdata/config.yaml") - assert.NoError(t, err, "Unexpected error") + require.NoError(t, err, "Unexpected error") assert.NotNil(t, config, "Config not loaded as expected") assert.Equal(t, "secretappconfig", config.ObjectMeta.Name) assert.Equal(t, "Configuration", config.TypeMeta.Kind) @@ -101,7 +101,7 @@ func TestLoadStandaloneConfiguration(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { config, err := LoadStandaloneConfiguration(tc.confFile) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, tc.metricEnabled, config.Spec.MetricSpec.GetEnabled()) }) } @@ -123,7 +123,7 @@ func TestLoadStandaloneConfiguration(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { config, err := LoadStandaloneConfiguration(tc.confFile) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, reflect.DeepEqual(tc.componentsDeny, config.Spec.ComponentsSpec.Deny)) }) } @@ -344,7 +344,7 @@ func TestSortAndValidateSecretsConfigration(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.config.sortAndValidateSecretsConfiguration() if tc.errorExpected { - assert.Error(t, err, "expected validation to fail") + require.Error(t, err, "expected validation to fail") } else if tc.config.Spec.Secrets != nil { for _, scope := range tc.config.Spec.Secrets.Scopes { assert.True(t, sort.StringsAreSorted(scope.AllowedSecrets), "expected sorted slice") @@ -431,7 +431,7 @@ func TestIsSecretAllowed(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.scope.IsSecretAllowed(tc.secretKey), tc.expectedResult, "incorrect access") + assert.Equal(t, tc.expectedResult, tc.scope.IsSecretAllowed(tc.secretKey), "incorrect access") }) } } diff --git a/pkg/diagnostics/component_monitoring_test.go b/pkg/diagnostics/component_monitoring_test.go index 796be5613ce..eeeec6e613e 100644 --- a/pkg/diagnostics/component_monitoring_test.go +++ b/pkg/diagnostics/component_monitoring_test.go @@ -42,7 +42,7 @@ func TestPubSub(t *testing.T) { allTagsPresent(t, v, viewData[0].Tags) - assert.Equal(t, float64(1), viewData[0].Data.(*view.DistributionData).Min) + assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0) }) t.Run("record egress latency", func(t *testing.T) { @@ -55,7 +55,7 @@ func TestPubSub(t *testing.T) { allTagsPresent(t, v, viewData[0].Tags) - assert.Equal(t, float64(1), viewData[0].Data.(*view.DistributionData).Min) + assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0) }) } @@ -81,7 +81,7 @@ func TestBindings(t *testing.T) { allTagsPresent(t, v, viewData[0].Tags) - assert.Equal(t, float64(1), viewData[0].Data.(*view.DistributionData).Min) + assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0) }) t.Run("record output binding count", func(t *testing.T) { @@ -105,7 +105,7 @@ func TestBindings(t *testing.T) { allTagsPresent(t, v, viewData[0].Tags) - assert.Equal(t, float64(1), viewData[0].Data.(*view.DistributionData).Min) + assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0) }) } @@ -130,7 +130,7 @@ func TestState(t *testing.T) { v := view.Find("component/state/latencies") allTagsPresent(t, v, viewData[0].Tags) - assert.Equal(t, float64(1), viewData[0].Data.(*view.DistributionData).Min) + assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0) }) } @@ -156,7 +156,7 @@ func TestConfiguration(t *testing.T) { allTagsPresent(t, v, viewData[0].Tags) - assert.Equal(t, float64(1), viewData[0].Data.(*view.DistributionData).Min) + assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0) }) } @@ -182,15 +182,15 @@ func TestSecrets(t *testing.T) { allTagsPresent(t, v, viewData[0].Tags) - assert.Equal(t, float64(1), viewData[0].Data.(*view.DistributionData).Min) + assert.InEpsilon(t, 1, viewData[0].Data.(*view.DistributionData).Min, 0) }) } func TestComponentMetricsInit(t *testing.T) { c := componentsMetrics() assert.True(t, c.enabled) - assert.Equal(t, c.appID, "test") - assert.Equal(t, c.namespace, "default") + assert.Equal(t, "test", c.appID) + assert.Equal(t, "default", c.namespace) } func TestElapsedSince(t *testing.T) { @@ -198,5 +198,5 @@ func TestElapsedSince(t *testing.T) { time.Sleep(time.Second) elapsed := ElapsedSince(start) - assert.True(t, elapsed >= 1000) + assert.GreaterOrEqual(t, elapsed, float64(1000)) } diff --git a/pkg/diagnostics/grpc_monitoring_test.go b/pkg/diagnostics/grpc_monitoring_test.go index 4c9a6a24d07..85eea337334 100644 --- a/pkg/diagnostics/grpc_monitoring_test.go +++ b/pkg/diagnostics/grpc_monitoring_test.go @@ -72,15 +72,15 @@ func TestStreamingServerInterceptor(t *testing.T) { } err := i(nil, s, &grpc.StreamServerInfo{}, f) - assert.NoError(t, err) + require.NoError(t, err) rows, err := view.RetrieveData("grpc.io/server/completed_rpcs") - assert.NoError(t, err) - assert.Equal(t, 0, len(rows)) + require.NoError(t, err) + assert.Empty(t, rows) rowsLatency, err := view.RetrieveData("grpc.io/server/server_latency") - assert.NoError(t, err) - assert.Equal(t, 0, len(rowsLatency)) + require.NoError(t, err) + assert.Empty(t, rowsLatency) }) t.Run("proxy request, run pipeline", func(t *testing.T) { @@ -96,18 +96,18 @@ func TestStreamingServerInterceptor(t *testing.T) { } err := i(nil, s, &grpc.StreamServerInfo{FullMethod: "/appv1.Test"}, f) - assert.NoError(t, err) + require.NoError(t, err) rows, err := view.RetrieveData("grpc.io/server/completed_rpcs") require.NoError(t, err) - require.Equal(t, 1, len(rows)) + require.Len(t, rows, 1) assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name()) assert.Equal(t, "grpc_server_method", rows[0].Tags[1].Key.Name()) assert.Equal(t, "grpc_server_status", rows[0].Tags[2].Key.Name()) rows, err = view.RetrieveData("grpc.io/server/server_latency") require.NoError(t, err) - require.Equal(t, 1, len(rows)) + require.Len(t, rows, 1) assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name()) assert.Equal(t, "grpc_server_method", rows[0].Tags[1].Key.Name()) }) @@ -125,15 +125,15 @@ func TestStreamingClientInterceptor(t *testing.T) { } err := i(nil, s, &grpc.StreamServerInfo{}, f) - assert.NoError(t, err) + require.NoError(t, err) rows, err := view.RetrieveData("grpc.io/client/completed_rpcs") - assert.NoError(t, err) - assert.Equal(t, 0, len(rows)) + require.NoError(t, err) + assert.Empty(t, rows) rowsLatency, err := view.RetrieveData("grpc.io/client/roundtrip_latency") - assert.NoError(t, err) - assert.Equal(t, 0, len(rowsLatency)) + require.NoError(t, err) + assert.Empty(t, rowsLatency) }) t.Run("proxy request, run pipeline", func(t *testing.T) { @@ -149,18 +149,18 @@ func TestStreamingClientInterceptor(t *testing.T) { } err := i(nil, s, &grpc.StreamServerInfo{FullMethod: "/appv1.Test"}, f) - assert.NoError(t, err) + require.NoError(t, err) rows, err := view.RetrieveData("grpc.io/client/completed_rpcs") - assert.NoError(t, err) - assert.Equal(t, 1, len(rows)) + require.NoError(t, err) + assert.Len(t, rows, 1) assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name()) assert.Equal(t, "grpc_client_method", rows[0].Tags[1].Key.Name()) assert.Equal(t, "grpc_client_status", rows[0].Tags[2].Key.Name()) rowsLatency, err := view.RetrieveData("grpc.io/client/roundtrip_latency") - assert.NoError(t, err) - assert.Equal(t, 1, len(rowsLatency)) + require.NoError(t, err) + assert.Len(t, rowsLatency, 1) assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name()) assert.Equal(t, "grpc_client_method", rows[0].Tags[1].Key.Name()) assert.Equal(t, "grpc_client_status", rows[0].Tags[2].Key.Name()) diff --git a/pkg/diagnostics/grpc_tracing.go b/pkg/diagnostics/grpc_tracing.go index 8f1d505c8e4..a8c923f0d5e 100644 --- a/pkg/diagnostics/grpc_tracing.go +++ b/pkg/diagnostics/grpc_tracing.go @@ -304,11 +304,11 @@ func spanAttributesMapFromGRPC(appID string, req any, rpcMethod string) map[stri // Rename spanname if s.GetActor() == nil { - m[diagConsts.DaprAPISpanNameInternal] = "CallLocal/" + appID + "/" + s.Message.Method - m[diagConsts.DaprAPIInvokeMethod] = s.Message.Method + m[diagConsts.DaprAPISpanNameInternal] = "CallLocal/" + appID + "/" + s.GetMessage().GetMethod() + m[diagConsts.DaprAPIInvokeMethod] = s.GetMessage().GetMethod() } else { - m[diagConsts.DaprAPISpanNameInternal] = "CallActor/" + s.Actor.ActorType + "/" + s.Message.Method - m[diagConsts.DaprAPIActorTypeID] = s.Actor.ActorType + "." + s.Actor.ActorId + m[diagConsts.DaprAPISpanNameInternal] = "CallActor/" + s.GetActor().GetActorType() + "/" + s.GetMessage().GetMethod() + m[diagConsts.DaprAPIActorTypeID] = s.GetActor().GetActorType() + "." + s.GetActor().GetActorId() } // Dapr APIs diff --git a/pkg/diagnostics/grpc_tracing_test.go b/pkg/diagnostics/grpc_tracing_test.go index dc32a308c39..e83320b064e 100644 --- a/pkg/diagnostics/grpc_tracing_test.go +++ b/pkg/diagnostics/grpc_tracing_test.go @@ -16,12 +16,14 @@ package diagnostics import ( "context" "encoding/base64" + "encoding/hex" "errors" "fmt" "strings" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" otelcodes "go.opentelemetry.io/otel/codes" sdktrace "go.opentelemetry.io/otel/sdk/trace" @@ -81,7 +83,7 @@ func TestUserDefinedMetadata(t *testing.T) { m := userDefinedMetadata(testCtx) - assert.Equal(t, 2, len(m)) + assert.Len(t, m, 2) assert.Equal(t, "value1", m["dapr-userdefined-1"]) assert.Equal(t, "value2", m["dapr-userdefined-2"]) } @@ -132,9 +134,9 @@ func TestGRPCTraceUnaryServerInterceptor(t *testing.T) { sc := span.SpanContext() traceID := sc.TraceID() - assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", fmt.Sprintf("%x", traceID[:])) + assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:])) spanID := sc.SpanID() - assert.NotEqual(t, "00f067aa0ba902b7", fmt.Sprintf("%x", spanID[:])) + assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:])) }) t.Run("grpc-trace-bin is not given", func(t *testing.T) { @@ -157,8 +159,8 @@ func TestGRPCTraceUnaryServerInterceptor(t *testing.T) { sc := span.SpanContext() traceID := sc.TraceID() spanID := sc.SpanID() - assert.NotEmpty(t, fmt.Sprintf("%x", traceID[:])) - assert.NotEmpty(t, fmt.Sprintf("%x", spanID[:])) + assert.NotEmpty(t, hex.EncodeToString(traceID[:])) + assert.NotEmpty(t, hex.EncodeToString(spanID[:])) }) t.Run("InvokeService call", func(t *testing.T) { @@ -183,8 +185,8 @@ func TestGRPCTraceUnaryServerInterceptor(t *testing.T) { assert.True(t, strings.Contains(spanString, "CallLocal/targetID/method1")) traceID := sc.TraceID() spanID := sc.SpanID() - assert.NotEmpty(t, fmt.Sprintf("%x", traceID[:])) - assert.NotEmpty(t, fmt.Sprintf("%x", spanID[:])) + assert.NotEmpty(t, hex.EncodeToString(traceID[:])) + assert.NotEmpty(t, hex.EncodeToString(spanID[:])) }) t.Run("InvokeService call with grpc status error", func(t *testing.T) { @@ -227,8 +229,8 @@ func TestGRPCTraceUnaryServerInterceptor(t *testing.T) { assert.True(t, strings.Contains(spanString, "CallLocal/targetID/method1")) traceID := sc.TraceID() spanID := sc.SpanID() - assert.NotEmpty(t, fmt.Sprintf("%x", traceID[:])) - assert.NotEmpty(t, fmt.Sprintf("%x", spanID[:])) + assert.NotEmpty(t, hex.EncodeToString(traceID[:])) + assert.NotEmpty(t, hex.EncodeToString(spanID[:])) }) } @@ -258,7 +260,7 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { } err := interceptor(nil, &fakeStream{}, fakeInfo, h) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("grpc-trace-bin is given", func(t *testing.T) { @@ -279,9 +281,9 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { sc := span.SpanContext() traceID := sc.TraceID() - assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", fmt.Sprintf("%x", traceID[:])) + assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:])) spanID := sc.SpanID() - assert.NotEqual(t, "00f067aa0ba902b7", fmt.Sprintf("%x", spanID[:])) + assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:])) }) t.Run("grpc-trace-bin is not given", func(t *testing.T) { @@ -300,8 +302,8 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { sc := span.SpanContext() traceID := sc.TraceID() spanID := sc.SpanID() - assert.NotEmpty(t, fmt.Sprintf("%x", traceID[:])) - assert.NotEmpty(t, fmt.Sprintf("%x", spanID[:])) + assert.NotEmpty(t, hex.EncodeToString(traceID[:])) + assert.NotEmpty(t, hex.EncodeToString(spanID[:])) }) }) @@ -316,7 +318,7 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { } err := interceptor(nil, &fakeStream{}, fakeInfo, h) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("grpc-trace-bin is given", func(t *testing.T) { @@ -337,9 +339,9 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { sc := span.SpanContext() traceID := sc.TraceID() - assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", fmt.Sprintf("%x", traceID[:])) + assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:])) spanID := sc.SpanID() - assert.NotEqual(t, "00f067aa0ba902b7", fmt.Sprintf("%x", spanID[:])) + assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:])) }) t.Run("grpc-trace-bin is not given", func(t *testing.T) { @@ -358,8 +360,8 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { sc := span.SpanContext() traceID := sc.TraceID() spanID := sc.SpanID() - assert.NotEmpty(t, fmt.Sprintf("%x", traceID[:])) - assert.NotEmpty(t, fmt.Sprintf("%x", spanID[:])) + assert.NotEmpty(t, hex.EncodeToString(traceID[:])) + assert.NotEmpty(t, hex.EncodeToString(spanID[:])) }) }) @@ -370,7 +372,7 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { } err := interceptor(nil, &fakeStream{}, fakeInfo, nil) - assert.Error(t, err) + require.Error(t, err) }) t.Run("proxy request with app id and grpc-trace-bin", func(t *testing.T) { @@ -392,13 +394,13 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { } err := interceptor(nil, &fakeStream{ctx}, fakeInfo, assertHandler) - assert.NoError(t, err) + require.NoError(t, err) sc := span.SpanContext() traceID := sc.TraceID() - assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", fmt.Sprintf("%x", traceID[:])) + assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:])) spanID := sc.SpanID() - assert.NotEqual(t, "00f067aa0ba902b7", fmt.Sprintf("%x", spanID[:])) + assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:])) }) t.Run("proxy request with app id and no grpc-trace-bin", func(t *testing.T) { @@ -419,13 +421,13 @@ func TestGRPCTraceStreamServerInterceptor(t *testing.T) { } err := interceptor(nil, &fakeStream{ctx}, fakeInfo, assertHandler) - assert.NoError(t, err) + require.NoError(t, err) sc := span.SpanContext() traceID := sc.TraceID() spanID := sc.SpanID() - assert.NotEmpty(t, fmt.Sprintf("%x", traceID[:])) - assert.NotEmpty(t, fmt.Sprintf("%x", spanID[:])) + assert.NotEmpty(t, hex.EncodeToString(traceID[:])) + assert.NotEmpty(t, hex.EncodeToString(spanID[:])) }) }) } diff --git a/pkg/diagnostics/http_monitoring_test.go b/pkg/diagnostics/http_monitoring_test.go index ac311bd6875..4a0a3dfa5f9 100644 --- a/pkg/diagnostics/http_monitoring_test.go +++ b/pkg/diagnostics/http_monitoring_test.go @@ -34,7 +34,7 @@ func TestHTTPMiddleware(t *testing.T) { // assert rows, err := view.RetrieveData("http/server/request_count") require.NoError(t, err) - assert.Equal(t, 1, len(rows)) + assert.Len(t, rows, 1) assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name()) assert.Equal(t, "fakeID", rows[0].Tags[0].Value) assert.Equal(t, "status", rows[0].Tags[1].Key.Name()) @@ -42,20 +42,20 @@ func TestHTTPMiddleware(t *testing.T) { rows, err = view.RetrieveData("http/server/request_bytes") require.NoError(t, err) - assert.Equal(t, 1, len(rows)) + assert.Len(t, rows, 1) assert.Equal(t, "app_id", rows[0].Tags[0].Key.Name()) assert.Equal(t, "fakeID", rows[0].Tags[0].Value) - assert.Equal(t, float64(len(requestBody)), (rows[0].Data).(*view.DistributionData).Min) + assert.InEpsilon(t, float64(len(requestBody)), (rows[0].Data).(*view.DistributionData).Min, 0) rows, err = view.RetrieveData("http/server/response_bytes") require.NoError(t, err) - assert.Equal(t, 1, len(rows)) - assert.Equal(t, float64(len(responseBody)), (rows[0].Data).(*view.DistributionData).Min) + assert.Len(t, rows, 1) + assert.InEpsilon(t, float64(len(responseBody)), (rows[0].Data).(*view.DistributionData).Min, 0) rows, err = view.RetrieveData("http/server/latency") require.NoError(t, err) - assert.Equal(t, 1, len(rows)) - assert.True(t, (rows[0].Data).(*view.DistributionData).Min >= 100.0) + assert.Len(t, rows, 1) + assert.GreaterOrEqual(t, (rows[0].Data).(*view.DistributionData).Min, 100.0) } func TestHTTPMiddlewareWhenMetricsDisabled(t *testing.T) { @@ -83,7 +83,7 @@ func TestHTTPMiddlewareWhenMetricsDisabled(t *testing.T) { // assert rows, err := view.RetrieveData("http/server/request_count") - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, rows) } diff --git a/pkg/diagnostics/http_tracing_test.go b/pkg/diagnostics/http_tracing_test.go index 049f10fac72..297c00e3b60 100644 --- a/pkg/diagnostics/http_tracing_test.go +++ b/pkg/diagnostics/http_tracing_test.go @@ -15,6 +15,7 @@ package diagnostics import ( "context" + "encoding/hex" "fmt" "net/http" "net/http/httptest" @@ -110,7 +111,7 @@ func TestUserDefinedHTTPHeaders(t *testing.T) { m := userDefinedHTTPHeaders(req) - assert.Equal(t, 2, len(m)) + assert.Len(t, m, 2) assert.Equal(t, "value1", m["dapr-userdefined-1"]) assert.Equal(t, "value2", m["dapr-userdefined-2"]) } @@ -135,7 +136,7 @@ func TestSpanContextToHTTPHeaders(t *testing.T) { got := SpanContextFromRequest(req) - assert.Equalf(t, got, wantSc, "SpanContextToHTTPHeaders() got = %v, want %v", got, wantSc) + assert.Equalf(t, wantSc, got, "SpanContextToHTTPHeaders() got = %v, want %v", got, wantSc) }) } @@ -216,7 +217,7 @@ func TestSpanContextToResponse(t *testing.T) { h := resp.Header().Get("traceparent") got, _ := SpanContextFromW3CString(h) - assert.Equalf(t, got, wantSc, "SpanContextToResponse() got = %v, want %v", got, wantSc) + assert.Equalf(t, wantSc, got, "SpanContextToResponse() got = %v, want %v", got, wantSc) }) } } @@ -275,8 +276,8 @@ func TestHTTPTraceMiddleware(t *testing.T) { sc := span.SpanContext() traceID := sc.TraceID() spanID := sc.SpanID() - assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", fmt.Sprintf("%x", traceID[:])) - assert.NotEqual(t, "00f067aa0ba902b7", fmt.Sprintf("%x", spanID[:])) + assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:])) + assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:])) }) t.Run("traceparent is not given in request", func(t *testing.T) { @@ -292,8 +293,8 @@ func TestHTTPTraceMiddleware(t *testing.T) { sc := span.SpanContext() traceID := sc.TraceID() spanID := sc.SpanID() - assert.NotEmpty(t, fmt.Sprintf("%x", traceID[:])) - assert.NotEmpty(t, fmt.Sprintf("%x", spanID[:])) + assert.NotEmpty(t, hex.EncodeToString(traceID[:])) + assert.NotEmpty(t, hex.EncodeToString(spanID[:])) }) t.Run("traceparent not given in response", func(t *testing.T) { @@ -352,8 +353,8 @@ func TestTraceStatusFromHTTPCode(t *testing.T) { for _, tt := range tests { t.Run("traceStatusFromHTTPCode", func(t *testing.T) { gotOtelCode, gotOtelCodeDescription := traceStatusFromHTTPCode(tt.httpCode) - assert.Equalf(t, gotOtelCode, tt.wantOtelCode, "traceStatusFromHTTPCode(%v) got = %v, want %v", tt.httpCode, gotOtelCode, tt.wantOtelCode) - assert.Equalf(t, gotOtelCodeDescription, tt.wantOtelCodeDescription, "traceStatusFromHTTPCode(%v) got = %v, want %v", tt.httpCode, gotOtelCodeDescription, tt.wantOtelCodeDescription) + assert.Equalf(t, tt.wantOtelCode, gotOtelCode, "traceStatusFromHTTPCode(%v) got = %v, want %v", tt.httpCode, gotOtelCode, tt.wantOtelCode) + assert.Equalf(t, tt.wantOtelCodeDescription, gotOtelCodeDescription, "traceStatusFromHTTPCode(%v) got = %v, want %v", tt.httpCode, gotOtelCodeDescription, tt.wantOtelCodeDescription) }) } } diff --git a/pkg/diagnostics/resiliency_monitoring_test.go b/pkg/diagnostics/resiliency_monitoring_test.go index 244b1041a45..74c233d964f 100644 --- a/pkg/diagnostics/resiliency_monitoring_test.go +++ b/pkg/diagnostics/resiliency_monitoring_test.go @@ -192,7 +192,7 @@ func TestResiliencyCountMonitoring(t *testing.T) { require.Error(t, err) } require.NoError(t, err) - require.Equal(t, test.wantNumberOfRows, len(rows)) + require.Len(t, rows, test.wantNumberOfRows) for _, wantTag := range test.wantTags { diag.RequireTagExist(t, rows, wantTag) } @@ -275,7 +275,7 @@ func TestResiliencyCountMonitoringCBStates(t *testing.T) { test.unitFn() rows, err := view.RetrieveData(resiliencyCountViewName) require.NoError(t, err) - require.Equal(t, test.wantNumberOfRows, len(rows)) + require.Len(t, rows, test.wantNumberOfRows) wantedTags := []tag.Tag{ diag.NewTag("app_id", testAppID), @@ -443,7 +443,7 @@ func TestResiliencyActivationsCountMonitoring(t *testing.T) { test.unitFn() rows, err := view.RetrieveData(resiliencyActivationViewName) require.NoError(t, err) - require.Equal(t, test.wantNumberOfRows, len(rows)) + require.Len(t, rows, test.wantNumberOfRows) if test.wantNumberOfRows == 0 { return } @@ -502,7 +502,7 @@ func TestResiliencyLoadedMonitoring(t *testing.T) { rows, err := view.RetrieveData(resiliencyLoadedViewName) require.NoError(t, err) - require.Equal(t, 1, len(rows)) + require.Len(t, rows, 1) diag.RequireTagExist(t, rows, diag.NewTag("app_id", testAppID)) diag.RequireTagExist(t, rows, diag.NewTag("name", testResiliencyName)) diff --git a/pkg/diagnostics/service_monitoring_test.go b/pkg/diagnostics/service_monitoring_test.go index 05c10fdb6bb..67f5095736f 100644 --- a/pkg/diagnostics/service_monitoring_test.go +++ b/pkg/diagnostics/service_monitoring_test.go @@ -82,7 +82,7 @@ func TestServiceInvocation(t *testing.T) { func TestSerivceMonitoringInit(t *testing.T) { c := servicesMetrics() assert.True(t, c.enabled) - assert.Equal(t, c.appID, "testAppId") + assert.Equal(t, "testAppId", c.appID) } // export for diagnostics_test package only unexported keys diff --git a/pkg/diagnostics/tracing_test.go b/pkg/diagnostics/tracing_test.go index cefd5b0f0f3..a2dc4821b7d 100644 --- a/pkg/diagnostics/tracing_test.go +++ b/pkg/diagnostics/tracing_test.go @@ -15,6 +15,7 @@ package diagnostics import ( "context" + "encoding/hex" "fmt" "math/rand" "sync" @@ -138,8 +139,8 @@ func TestStartInternalCallbackSpan(t *testing.T) { sc := gotSp.SpanContext() traceID := sc.TraceID() spanID := sc.SpanID() - assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", fmt.Sprintf("%x", traceID[:])) - assert.NotEqual(t, "00f067aa0ba902b7", fmt.Sprintf("%x", spanID[:])) + assert.Equal(t, "4bf92f3577b34da6a3ce929d0e0e4736", hex.EncodeToString(traceID[:])) + assert.NotEqual(t, "00f067aa0ba902b7", hex.EncodeToString(spanID[:])) }) t.Run("traceparent is provided with sampling flag = 1 but sampling is disabled", func(t *testing.T) { @@ -164,20 +165,20 @@ func TestStartInternalCallbackSpan(t *testing.T) { const expectSampled = 1051 const numTraces = 100000 sampledCount := runTraces(t, "test_trace", numTraces, "0.01", 0) - require.Equal(t, sampledCount, expectSampled, "Expected to sample %d traces but sampled %d", expectSampled, sampledCount) + require.Equal(t, expectSampled, sampledCount, "Expected to sample %d traces but sampled %d", expectSampled, sampledCount) require.Less(t, sampledCount, numTraces, "Expected to sample fewer than the total number of traces, but sampled all of them!") }) t.Run("traceparent is provided with sampling flag = 0 and sampling is enabled (and P=1.00)", func(t *testing.T) { const numTraces = 1000 sampledCount := runTraces(t, "test_trace", numTraces, "1.00", 0) - require.Equal(t, sampledCount, numTraces, "Expected to sample all traces (%d) but only sampled %d", numTraces, sampledCount) + require.Equal(t, numTraces, sampledCount, "Expected to sample all traces (%d) but only sampled %d", numTraces, sampledCount) }) t.Run("traceparent is provided with sampling flag = 1 and sampling is enabled (but not P=1.00)", func(t *testing.T) { const numTraces = 1000 sampledCount := runTraces(t, "test_trace", numTraces, "0.00001", 1) - require.Equal(t, sampledCount, numTraces, "Expected to sample all traces (%d) but only sampled %d", numTraces, sampledCount) + require.Equal(t, numTraces, sampledCount, "Expected to sample all traces (%d) but only sampled %d", numTraces, sampledCount) }) } diff --git a/pkg/diagnostics/utils/metrics_utils_test.go b/pkg/diagnostics/utils/metrics_utils_test.go index 87008b3d775..8a026d4d53d 100644 --- a/pkg/diagnostics/utils/metrics_utils_test.go +++ b/pkg/diagnostics/utils/metrics_utils_test.go @@ -76,7 +76,7 @@ func TestCreateRulesMap(t *testing.T) { }, }, }) - assert.Error(t, err) + require.Error(t, err) }) t.Run("valid rule", func(t *testing.T) { diff --git a/pkg/encryption/encryption_test.go b/pkg/encryption/encryption_test.go index 28e8eb384f4..19566db3154 100644 --- a/pkg/encryption/encryption_test.go +++ b/pkg/encryption/encryption_test.go @@ -25,6 +25,7 @@ import ( "github.com/dapr/dapr/pkg/apis/components/v1alpha1" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -101,7 +102,7 @@ func TestComponentEncryptionKey(t *testing.T) { }}) keys, err := ComponentEncryptionKey(component, secretStore) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, primaryKey, keys.Primary.Key) assert.Equal(t, secondaryKey, keys.Secondary.Key) }) @@ -132,7 +133,7 @@ func TestComponentEncryptionKey(t *testing.T) { keys, err := ComponentEncryptionKey(component, nil) assert.Empty(t, keys.Primary.Key) assert.Empty(t, keys.Secondary.Key) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("no error when component doesn't have encryption keys", func(t *testing.T) { @@ -150,7 +151,7 @@ func TestComponentEncryptionKey(t *testing.T) { } _, err := ComponentEncryptionKey(component, nil) - assert.NoError(t, err) + require.NoError(t, err) }) } @@ -165,7 +166,7 @@ func TestTryGetEncryptionKeyFromMetadataItem(t *testing.T) { }}) _, err := tryGetEncryptionKeyFromMetadataItem("", commonapi.NameValuePair{}, secretStore) - assert.Error(t, err) + require.Error(t, err) }) } @@ -176,7 +177,7 @@ func TestCreateCipher(t *testing.T) { }, AESGCMAlgorithm) assert.Nil(t, cipherObj) - assert.Error(t, err) + require.Error(t, err) }) t.Run("valid 256-bit key", func(t *testing.T) { @@ -190,7 +191,7 @@ func TestCreateCipher(t *testing.T) { }, AESGCMAlgorithm) assert.NotNil(t, cipherObj) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("valid 192-bit key", func(t *testing.T) { @@ -204,7 +205,7 @@ func TestCreateCipher(t *testing.T) { }, AESGCMAlgorithm) assert.NotNil(t, cipherObj) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("valid 128-bit key", func(t *testing.T) { @@ -218,7 +219,7 @@ func TestCreateCipher(t *testing.T) { }, AESGCMAlgorithm) assert.NotNil(t, cipherObj) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("invalid key size", func(t *testing.T) { @@ -232,7 +233,7 @@ func TestCreateCipher(t *testing.T) { }, AESGCMAlgorithm) assert.Nil(t, cipherObj) - assert.Error(t, err) + require.Error(t, err) }) t.Run("invalid algorithm", func(t *testing.T) { @@ -246,6 +247,6 @@ func TestCreateCipher(t *testing.T) { }, "3DES") assert.Nil(t, cipherObj) - assert.Error(t, err) + require.Error(t, err) }) } diff --git a/pkg/encryption/state_test.go b/pkg/encryption/state_test.go index 09f8aad318f..33ba2887bbd 100644 --- a/pkg/encryption/state_test.go +++ b/pkg/encryption/state_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAddEncryptedStateStore(t *testing.T) { @@ -89,11 +90,11 @@ func TestTryEncryptValue(t *testing.T) { v := []byte("hello") r, err := TryEncryptValue("test", v) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEqual(t, v, r) dr, err := TryDecryptValue("test", r) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, v, dr) }) @@ -121,7 +122,7 @@ func TestTryEncryptValue(t *testing.T) { v := []byte("hello") r, err := TryEncryptValue("test", v) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEqual(t, v, r) encryptedStateStores = map[string]ComponentEncryptionKeys{} @@ -130,7 +131,7 @@ func TestTryEncryptValue(t *testing.T) { }) dr, err := TryDecryptValue("test", r) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, v, dr) }) @@ -159,11 +160,11 @@ func TestTryEncryptValue(t *testing.T) { s := base64.StdEncoding.EncodeToString(v) r, err := TryEncryptValue("test", []byte(s)) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEqual(t, v, r) dr, err := TryDecryptValue("test", r) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []byte(s), dr) }) @@ -191,11 +192,11 @@ func TestTryEncryptValue(t *testing.T) { v := []byte("hello world") r, err := TryEncryptValue("test", v) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEqual(t, v, r) dr, err := TryDecryptValue("test", r) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, v, dr) }) } @@ -223,7 +224,7 @@ func TestTryDecryptValue(t *testing.T) { }) dr, err := TryDecryptValue("test", nil) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, dr) }) } diff --git a/pkg/expr/expr_test.go b/pkg/expr/expr_test.go index 1e1dec7589d..4544753d242 100644 --- a/pkg/expr/expr_test.go +++ b/pkg/expr/expr_test.go @@ -24,7 +24,7 @@ func TestEval(t *testing.T) { }, }) require.NoError(t, err) - assert.Equal(t, true, result) + assert.True(t, result.(bool)) } func TestJSONMarshal(t *testing.T) { @@ -42,7 +42,7 @@ func TestEmptyProgramNoPanic(t *testing.T) { r, err := e.Eval(map[string]interface{}{}) assert.Nil(t, r) - assert.NotNil(t, err) + require.Error(t, err) } var result interface{} diff --git a/pkg/grpc/api.go b/pkg/grpc/api.go index 50523149a71..523cda95d32 100644 --- a/pkg/grpc/api.go +++ b/pkg/grpc/api.go @@ -146,15 +146,15 @@ func (a *api) validateAndGetPubsubAndTopic(pubsubName, topic string, reqMeta map } func (a *api) PublishEvent(ctx context.Context, in *runtimev1pb.PublishEventRequest) (*emptypb.Empty, error) { - thepubsub, pubsubName, topic, rawPayload, validationErr := a.validateAndGetPubsubAndTopic(in.PubsubName, in.Topic, in.Metadata) + thepubsub, pubsubName, topic, rawPayload, validationErr := a.validateAndGetPubsubAndTopic(in.GetPubsubName(), in.GetTopic(), in.GetMetadata()) if validationErr != nil { apiServerLogger.Debug(validationErr) return &emptypb.Empty{}, validationErr } body := []byte{} - if in.Data != nil { - body = in.Data + if in.GetData() != nil { + body = in.GetData() } data := body @@ -165,13 +165,13 @@ func (a *api) PublishEvent(ctx context.Context, in *runtimev1pb.PublishEventRequ envelope, err := runtimePubsub.NewCloudEvent(&runtimePubsub.CloudEvent{ Source: a.UniversalAPI.AppID, - Topic: in.Topic, - DataContentType: in.DataContentType, + Topic: in.GetTopic(), + DataContentType: in.GetDataContentType(), Data: body, TraceID: corID, TraceState: traceState, - Pubsub: in.PubsubName, - }, in.Metadata) + Pubsub: in.GetPubsubName(), + }, in.GetMetadata()) if err != nil { err = status.Errorf(codes.InvalidArgument, messages.ErrPubsubCloudEventCreation, err.Error()) apiServerLogger.Debug(err) @@ -179,7 +179,7 @@ func (a *api) PublishEvent(ctx context.Context, in *runtimev1pb.PublishEventRequ } features := thepubsub.Features() - pubsub.ApplyMetadata(envelope, features, in.Metadata) + pubsub.ApplyMetadata(envelope, features, in.GetMetadata()) data, err = json.Marshal(envelope) if err != nil { @@ -193,7 +193,7 @@ func (a *api) PublishEvent(ctx context.Context, in *runtimev1pb.PublishEventRequ PubsubName: pubsubName, Topic: topic, Data: data, - Metadata: in.Metadata, + Metadata: in.GetMetadata(), } start := time.Now() @@ -242,7 +242,7 @@ func (a *api) InvokeService(ctx context.Context, in *runtimev1pb.InvokeServiceRe if invokeServiceDeprecationNoticeShown.CompareAndSwap(false, true) { apiServerLogger.Warn("[DEPRECATION NOTICE] InvokeService is deprecated and will be removed in the future, please use proxy mode instead.") } - policyDef := a.UniversalAPI.Resiliency.EndpointPolicy(in.Id, in.Id+":"+in.Message.Method) + policyDef := a.UniversalAPI.Resiliency.EndpointPolicy(in.GetId(), in.GetId()+":"+in.GetMessage().GetMethod()) req := invokev1.FromInvokeRequestMessage(in.GetMessage()) if policyDef != nil { @@ -257,13 +257,13 @@ func (a *api) InvokeService(ctx context.Context, in *runtimev1pb.InvokeServiceRe policyRunner := resiliency.NewRunner[*invokeServiceResp](ctx, policyDef) resp, err := policyRunner(func(ctx context.Context) (*invokeServiceResp, error) { rResp := &invokeServiceResp{} - imr, rErr := a.directMessaging.Invoke(ctx, in.Id, req) + imr, rErr := a.directMessaging.Invoke(ctx, in.GetId(), req) if imr != nil { // Read the entire message in memory then close imr pd, pdErr := imr.ProtoWithData() imr.Close() if pd != nil { - rResp.message = pd.Message + rResp.message = pd.GetMessage() } // If we have an error, set it only if rErr is not already set @@ -272,7 +272,7 @@ func (a *api) InvokeService(ctx context.Context, in *runtimev1pb.InvokeServiceRe } } if rErr != nil { - return rResp, messages.ErrDirectInvoke.WithFormat(in.Id, rErr) + return rResp, messages.ErrDirectInvoke.WithFormat(in.GetId(), rErr) } rResp.headers = invokev1.InternalMetadataToGrpcMetadata(ctx, imr.Headers(), true) @@ -282,10 +282,10 @@ func (a *api) InvokeService(ctx context.Context, in *runtimev1pb.InvokeServiceRe apiServerLogger.Warn("[DEPRECATION NOTICE] Invocation path of gRPC -> HTTP is deprecated and will be removed in the future.") } var errorMessage string - if rResp.message != nil && rResp.message.Data != nil { - errorMessage = string(rResp.message.Data.Value) + if rResp.message != nil && rResp.message.GetData() != nil { + errorMessage = string(rResp.message.GetData().GetValue()) } - code := int(imr.Status().Code) + code := int(imr.Status().GetCode()) // If the status is OK, will be nil rErr = invokev1.ErrorFromHTTPResponseCode(code, errorMessage) // Populate http status code to header @@ -324,7 +324,7 @@ func (a *api) InvokeService(ctx context.Context, in *runtimev1pb.InvokeServiceRe } func (a *api) BulkPublishEventAlpha1(ctx context.Context, in *runtimev1pb.BulkPublishRequest) (*runtimev1pb.BulkPublishResponse, error) { - thepubsub, pubsubName, topic, rawPayload, validationErr := a.validateAndGetPubsubAndTopic(in.PubsubName, in.Topic, in.Metadata) + thepubsub, pubsubName, topic, rawPayload, validationErr := a.validateAndGetPubsubAndTopic(in.GetPubsubName(), in.GetTopic(), in.GetMetadata()) if validationErr != nil { apiServerLogger.Debug(validationErr) return &runtimev1pb.BulkPublishResponse{}, validationErr @@ -342,24 +342,24 @@ func (a *api) BulkPublishEventAlpha1(ctx context.Context, in *runtimev1pb.BulkPu } features := thepubsub.Features() - entryIdSet := make(map[string]struct{}, len(in.Entries)) //nolint:stylecheck + entryIdSet := make(map[string]struct{}, len(in.GetEntries())) //nolint:stylecheck - entries := make([]pubsub.BulkMessageEntry, len(in.Entries)) - for i, entry := range in.Entries { + entries := make([]pubsub.BulkMessageEntry, len(in.GetEntries())) + for i, entry := range in.GetEntries() { // Validate entry_id - if _, ok := entryIdSet[entry.EntryId]; ok || entry.EntryId == "" { - err := status.Errorf(codes.InvalidArgument, messages.ErrPubsubMarshal, in.Topic, in.PubsubName, "entryId is duplicated or not present for entry") + if _, ok := entryIdSet[entry.GetEntryId()]; ok || entry.GetEntryId() == "" { + err := status.Errorf(codes.InvalidArgument, messages.ErrPubsubMarshal, in.GetTopic(), in.GetPubsubName(), "entryId is duplicated or not present for entry") apiServerLogger.Debug(err) return &runtimev1pb.BulkPublishResponse{}, err } - entryIdSet[entry.EntryId] = struct{}{} - entries[i].EntryId = entry.EntryId - entries[i].ContentType = entry.ContentType - entries[i].Event = entry.Event + entryIdSet[entry.GetEntryId()] = struct{}{} + entries[i].EntryId = entry.GetEntryId() + entries[i].ContentType = entry.GetContentType() + entries[i].Event = entry.GetEvent() // Populate entry metadata with request level metadata. Entry level metadata keys // override request level metadata. - if entry.Metadata != nil { - entries[i].Metadata = utils.PopulateMetadataForBulkPublishEntry(in.Metadata, entry.Metadata) + if entry.GetMetadata() != nil { + entries[i].Metadata = utils.PopulateMetadataForBulkPublishEntry(in.GetMetadata(), entry.GetMetadata()) } if !rawPayload { @@ -403,7 +403,7 @@ func (a *api) BulkPublishEventAlpha1(ctx context.Context, in *runtimev1pb.BulkPu PubsubName: pubsubName, Topic: topic, Entries: entries, - Metadata: in.Metadata, + Metadata: in.GetMetadata(), } start := time.Now() @@ -445,7 +445,7 @@ func (a *api) BulkPublishEventAlpha1(ctx context.Context, in *runtimev1pb.BulkPu if r.Error != nil { resEntry.Error = r.Error.Error() } - bulkRes.FailedEntries = append(bulkRes.FailedEntries, &resEntry) + bulkRes.FailedEntries = append(bulkRes.GetFailedEntries(), &resEntry) } closeChildSpans(ctx, nil) // even on partial failures, err is nil. As when error is set, the response is expected to not be processed. @@ -454,11 +454,11 @@ func (a *api) BulkPublishEventAlpha1(ctx context.Context, in *runtimev1pb.BulkPu func (a *api) InvokeBinding(ctx context.Context, in *runtimev1pb.InvokeBindingRequest) (*runtimev1pb.InvokeBindingResponse, error) { req := &bindings.InvokeRequest{ - Metadata: make(map[string]string, len(in.Metadata)), - Operation: bindings.OperationKind(in.Operation), - Data: in.Data, + Metadata: make(map[string]string, len(in.GetMetadata())), + Operation: bindings.OperationKind(in.GetOperation()), + Data: in.GetData(), } - for key, val := range in.Metadata { + for key, val := range in.GetMetadata() { req.Metadata[key] = val } @@ -472,13 +472,13 @@ func (a *api) InvokeBinding(ctx context.Context, in *runtimev1pb.InvokeBindingRe r := &runtimev1pb.InvokeBindingResponse{} start := time.Now() - resp, err := a.sendToOutputBindingFn(ctx, in.Name, req) + resp, err := a.sendToOutputBindingFn(ctx, in.GetName(), req) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.OutputBindingEvent(context.Background(), in.Name, in.Operation, err == nil, elapsed) + diag.DefaultComponentMonitoring.OutputBindingEvent(context.Background(), in.GetName(), in.GetOperation(), err == nil, elapsed) if err != nil { - err = status.Errorf(codes.Internal, messages.ErrInvokeOutputBinding, in.Name, err.Error()) + err = status.Errorf(codes.Internal, messages.ErrInvokeOutputBinding, in.GetName(), err.Error()) apiServerLogger.Debug(err) return r, err } @@ -492,41 +492,41 @@ func (a *api) InvokeBinding(ctx context.Context, in *runtimev1pb.InvokeBindingRe func (a *api) GetBulkState(ctx context.Context, in *runtimev1pb.GetBulkStateRequest) (*runtimev1pb.GetBulkStateResponse, error) { bulkResp := &runtimev1pb.GetBulkStateResponse{} - store, err := a.UniversalAPI.GetStateStore(in.StoreName) + store, err := a.UniversalAPI.GetStateStore(in.GetStoreName()) if err != nil { // Error has already been logged return bulkResp, err } - if len(in.Keys) == 0 { + if len(in.GetKeys()) == 0 { return bulkResp, nil } var key string - reqs := make([]state.GetRequest, len(in.Keys)) - for i, k := range in.Keys { - key, err = stateLoader.GetModifiedStateKey(k, in.StoreName, a.UniversalAPI.AppID) + reqs := make([]state.GetRequest, len(in.GetKeys())) + for i, k := range in.GetKeys() { + key, err = stateLoader.GetModifiedStateKey(k, in.GetStoreName(), a.UniversalAPI.AppID) if err != nil { return &runtimev1pb.GetBulkStateResponse{}, err } r := state.GetRequest{ Key: key, - Metadata: in.Metadata, + Metadata: in.GetMetadata(), } reqs[i] = r } start := time.Now() - policyDef := a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Statestore) + policyDef := a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Statestore) bgrPolicyRunner := resiliency.NewRunner[[]state.BulkGetResponse](ctx, policyDef) responses, err := bgrPolicyRunner(func(ctx context.Context) ([]state.BulkGetResponse, error) { return store.BulkGet(ctx, reqs, state.BulkGetOpts{ - Parallelism: int(in.Parallelism), + Parallelism: int(in.GetParallelism()), }) }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.StateInvoked(ctx, in.StoreName, diag.BulkGet, err == nil, elapsed) + diag.DefaultComponentMonitoring.StateInvoked(ctx, in.GetStoreName(), diag.BulkGet, err == nil, elapsed) if err != nil { return bulkResp, err @@ -544,14 +544,14 @@ func (a *api) GetBulkState(ctx context.Context, in *runtimev1pb.GetBulkStateRequ bulkResp.Items[i] = item } - if encryption.EncryptedStateStore(in.StoreName) { - for i := range bulkResp.Items { - if bulkResp.Items[i].Error != "" || len(bulkResp.Items[i].Data) == 0 { + if encryption.EncryptedStateStore(in.GetStoreName()) { + for i := range bulkResp.GetItems() { + if bulkResp.GetItems()[i].GetError() != "" || len(bulkResp.GetItems()[i].GetData()) == 0 { bulkResp.Items[i].Data = nil continue } - val, err := encryption.TryDecryptValue(in.StoreName, bulkResp.Items[i].Data) + val, err := encryption.TryDecryptValue(in.GetStoreName(), bulkResp.GetItems()[i].GetData()) if err != nil { apiServerLogger.Debugf("Bulk get error: %v", err) bulkResp.Items[i].Data = nil @@ -567,36 +567,36 @@ func (a *api) GetBulkState(ctx context.Context, in *runtimev1pb.GetBulkStateRequ } func (a *api) GetState(ctx context.Context, in *runtimev1pb.GetStateRequest) (*runtimev1pb.GetStateResponse, error) { - store, err := a.UniversalAPI.GetStateStore(in.StoreName) + store, err := a.UniversalAPI.GetStateStore(in.GetStoreName()) if err != nil { // Error has already been logged return &runtimev1pb.GetStateResponse{}, err } - key, err := stateLoader.GetModifiedStateKey(in.Key, in.StoreName, a.UniversalAPI.AppID) + key, err := stateLoader.GetModifiedStateKey(in.GetKey(), in.GetStoreName(), a.UniversalAPI.AppID) if err != nil { return &runtimev1pb.GetStateResponse{}, err } req := &state.GetRequest{ Key: key, - Metadata: in.Metadata, + Metadata: in.GetMetadata(), Options: state.GetStateOption{ - Consistency: stateConsistencyToString(in.Consistency), + Consistency: stateConsistencyToString(in.GetConsistency()), }, } start := time.Now() policyRunner := resiliency.NewRunner[*state.GetResponse](ctx, - a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Statestore), + a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Statestore), ) getResponse, err := policyRunner(func(ctx context.Context) (*state.GetResponse, error) { return store.Get(ctx, req) }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.StateInvoked(ctx, in.StoreName, diag.Get, err == nil, elapsed) + diag.DefaultComponentMonitoring.StateInvoked(ctx, in.GetStoreName(), diag.Get, err == nil, elapsed) if err != nil { - err = status.Errorf(codes.Internal, messages.ErrStateGet, in.Key, in.StoreName, err.Error()) + err = status.Errorf(codes.Internal, messages.ErrStateGet, in.GetKey(), in.GetStoreName(), err.Error()) a.UniversalAPI.Logger.Debug(err) return &runtimev1pb.GetStateResponse{}, err } @@ -604,10 +604,10 @@ func (a *api) GetState(ctx context.Context, in *runtimev1pb.GetStateRequest) (*r if getResponse == nil { getResponse = &state.GetResponse{} } - if encryption.EncryptedStateStore(in.StoreName) { - val, err := encryption.TryDecryptValue(in.StoreName, getResponse.Data) + if encryption.EncryptedStateStore(in.GetStoreName()) { + val, err := encryption.TryDecryptValue(in.GetStoreName(), getResponse.Data) if err != nil { - err = status.Errorf(codes.Internal, messages.ErrStateGet, in.Key, in.StoreName, err.Error()) + err = status.Errorf(codes.Internal, messages.ErrStateGet, in.GetKey(), in.GetStoreName(), err.Error()) a.UniversalAPI.Logger.Debug(err) return &runtimev1pb.GetStateResponse{}, err } @@ -627,53 +627,53 @@ func (a *api) GetState(ctx context.Context, in *runtimev1pb.GetStateRequest) (*r func (a *api) SaveState(ctx context.Context, in *runtimev1pb.SaveStateRequest) (*emptypb.Empty, error) { empty := &emptypb.Empty{} - store, err := a.UniversalAPI.GetStateStore(in.StoreName) + store, err := a.UniversalAPI.GetStateStore(in.GetStoreName()) if err != nil { // Error has already been logged return empty, err } - l := len(in.States) + l := len(in.GetStates()) if l == 0 { return empty, nil } reqs := make([]state.SetRequest, l) - for i, s := range in.States { - if len(s.Key) == 0 { + for i, s := range in.GetStates() { + if len(s.GetKey()) == 0 { return empty, status.Errorf(codes.InvalidArgument, "state key cannot be empty") } var key string - key, err = stateLoader.GetModifiedStateKey(s.Key, in.StoreName, a.UniversalAPI.AppID) + key, err = stateLoader.GetModifiedStateKey(s.GetKey(), in.GetStoreName(), a.UniversalAPI.AppID) if err != nil { return empty, err } req := state.SetRequest{ Key: key, - Metadata: s.Metadata, + Metadata: s.GetMetadata(), } if req.Metadata[contribMetadata.ContentType] == contenttype.JSONContentType { - err = json.Unmarshal(s.Value, &req.Value) + err = json.Unmarshal(s.GetValue(), &req.Value) if err != nil { return empty, err } } else { - req.Value = s.Value + req.Value = s.GetValue() } - if s.Etag != nil { + if s.GetEtag() != nil { req.ETag = &s.Etag.Value } - if s.Options != nil { + if s.GetOptions() != nil { req.Options = state.SetStateOption{ - Consistency: stateConsistencyToString(s.Options.Consistency), - Concurrency: stateConcurrencyToString(s.Options.Concurrency), + Consistency: stateConsistencyToString(s.GetOptions().GetConsistency()), + Concurrency: stateConcurrencyToString(s.GetOptions().GetConcurrency()), } } - if encryption.EncryptedStateStore(in.StoreName) { - val, encErr := encryption.TryEncryptValue(in.StoreName, s.Value) + if encryption.EncryptedStateStore(in.GetStoreName()) { + val, encErr := encryption.TryEncryptValue(in.GetStoreName(), s.GetValue()) if encErr != nil { a.UniversalAPI.Logger.Debug(encErr) return empty, encErr @@ -687,17 +687,17 @@ func (a *api) SaveState(ctx context.Context, in *runtimev1pb.SaveStateRequest) ( start := time.Now() err = stateLoader.PerformBulkStoreOperation(ctx, reqs, - a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Statestore), + a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Statestore), state.BulkStoreOpts{}, store.Set, store.BulkSet, ) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.StateInvoked(ctx, in.StoreName, diag.Set, err == nil, elapsed) + diag.DefaultComponentMonitoring.StateInvoked(ctx, in.GetStoreName(), diag.Set, err == nil, elapsed) if err != nil { - err = a.stateErrorResponse(err, messages.ErrStateSave, in.StoreName, err.Error()) + err = a.stateErrorResponse(err, messages.ErrStateSave, in.GetStoreName(), err.Error()) a.UniversalAPI.Logger.Debug(err) return empty, err } @@ -722,43 +722,43 @@ func (a *api) stateErrorResponse(err error, format string, args ...interface{}) func (a *api) DeleteState(ctx context.Context, in *runtimev1pb.DeleteStateRequest) (*emptypb.Empty, error) { empty := &emptypb.Empty{} - store, err := a.UniversalAPI.GetStateStore(in.StoreName) + store, err := a.UniversalAPI.GetStateStore(in.GetStoreName()) if err != nil { // Error has already been logged return empty, err } - key, err := stateLoader.GetModifiedStateKey(in.Key, in.StoreName, a.UniversalAPI.AppID) + key, err := stateLoader.GetModifiedStateKey(in.GetKey(), in.GetStoreName(), a.UniversalAPI.AppID) if err != nil { return empty, err } req := state.DeleteRequest{ Key: key, - Metadata: in.Metadata, + Metadata: in.GetMetadata(), } - if in.Etag != nil { + if in.GetEtag() != nil { req.ETag = &in.Etag.Value } - if in.Options != nil { + if in.GetOptions() != nil { req.Options = state.DeleteStateOption{ - Concurrency: stateConcurrencyToString(in.Options.Concurrency), - Consistency: stateConsistencyToString(in.Options.Consistency), + Concurrency: stateConcurrencyToString(in.GetOptions().GetConcurrency()), + Consistency: stateConsistencyToString(in.GetOptions().GetConsistency()), } } start := time.Now() policyRunner := resiliency.NewRunner[any](ctx, - a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Statestore), + a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Statestore), ) _, err = policyRunner(func(ctx context.Context) (any, error) { return nil, store.Delete(ctx, &req) }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.StateInvoked(ctx, in.StoreName, diag.Delete, err == nil, elapsed) + diag.DefaultComponentMonitoring.StateInvoked(ctx, in.GetStoreName(), diag.Delete, err == nil, elapsed) if err != nil { - err = a.stateErrorResponse(err, messages.ErrStateDelete, in.Key, err.Error()) + err = a.stateErrorResponse(err, messages.ErrStateDelete, in.GetKey(), err.Error()) a.UniversalAPI.Logger.Debug(err) return empty, err } @@ -768,29 +768,29 @@ func (a *api) DeleteState(ctx context.Context, in *runtimev1pb.DeleteStateReques func (a *api) DeleteBulkState(ctx context.Context, in *runtimev1pb.DeleteBulkStateRequest) (*emptypb.Empty, error) { empty := &emptypb.Empty{} - store, err := a.UniversalAPI.GetStateStore(in.StoreName) + store, err := a.UniversalAPI.GetStateStore(in.GetStoreName()) if err != nil { // Error has already been logged return empty, err } - reqs := make([]state.DeleteRequest, len(in.States)) - for i, item := range in.States { - key, err1 := stateLoader.GetModifiedStateKey(item.Key, in.StoreName, a.UniversalAPI.AppID) + reqs := make([]state.DeleteRequest, len(in.GetStates())) + for i, item := range in.GetStates() { + key, err1 := stateLoader.GetModifiedStateKey(item.GetKey(), in.GetStoreName(), a.UniversalAPI.AppID) if err1 != nil { return empty, err1 } req := state.DeleteRequest{ Key: key, - Metadata: item.Metadata, + Metadata: item.GetMetadata(), } - if item.Etag != nil { + if item.GetEtag() != nil { req.ETag = &item.Etag.Value } - if item.Options != nil { + if item.GetOptions() != nil { req.Options = state.DeleteStateOption{ - Concurrency: stateConcurrencyToString(item.Options.Concurrency), - Consistency: stateConsistencyToString(item.Options.Consistency), + Concurrency: stateConcurrencyToString(item.GetOptions().GetConcurrency()), + Consistency: stateConsistencyToString(item.GetOptions().GetConsistency()), } } reqs[i] = req @@ -798,17 +798,17 @@ func (a *api) DeleteBulkState(ctx context.Context, in *runtimev1pb.DeleteBulkSta start := time.Now() err = stateLoader.PerformBulkStoreOperation(ctx, reqs, - a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Statestore), + a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Statestore), state.BulkStoreOpts{}, store.Delete, store.BulkDelete, ) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.StateInvoked(ctx, in.StoreName, diag.BulkDelete, err == nil, elapsed) + diag.DefaultComponentMonitoring.StateInvoked(ctx, in.GetStoreName(), diag.BulkDelete, err == nil, elapsed) if err != nil { - err = a.stateErrorResponse(err, messages.ErrStateDeleteBulk, in.StoreName, err.Error()) + err = a.stateErrorResponse(err, messages.ErrStateDeleteBulk, in.GetStoreName(), err.Error()) a.UniversalAPI.Logger.Debug(err) return empty, err } @@ -817,14 +817,14 @@ func (a *api) DeleteBulkState(ctx context.Context, in *runtimev1pb.DeleteBulkSta } func extractEtag(req *commonv1pb.StateItem) (bool, string) { - if req.Etag != nil { - return true, req.Etag.Value + if req.GetEtag() != nil { + return true, req.GetEtag().GetValue() } return false, "" } func (a *api) ExecuteStateTransaction(ctx context.Context, in *runtimev1pb.ExecuteStateTransactionRequest) (*emptypb.Empty, error) { - store, storeErr := a.UniversalAPI.GetStateStore(in.StoreName) + store, storeErr := a.UniversalAPI.GetStateStore(in.GetStoreName()) if storeErr != nil { // Error has already been logged return &emptypb.Empty{}, storeErr @@ -832,38 +832,38 @@ func (a *api) ExecuteStateTransaction(ctx context.Context, in *runtimev1pb.Execu transactionalStore, ok := store.(state.TransactionalStore) if !ok { - err := status.Errorf(codes.Unimplemented, messages.ErrStateStoreNotSupported, in.StoreName) + err := status.Errorf(codes.Unimplemented, messages.ErrStateStoreNotSupported, in.GetStoreName()) apiServerLogger.Debug(err) return &emptypb.Empty{}, err } - operations := make([]state.TransactionalStateOperation, 0, len(in.Operations)) - for _, inputReq := range in.Operations { - req := inputReq.Request + operations := make([]state.TransactionalStateOperation, 0, len(in.GetOperations())) + for _, inputReq := range in.GetOperations() { + req := inputReq.GetRequest() hasEtag, etag := extractEtag(req) - key, err := stateLoader.GetModifiedStateKey(req.Key, in.StoreName, a.UniversalAPI.AppID) + key, err := stateLoader.GetModifiedStateKey(req.GetKey(), in.GetStoreName(), a.UniversalAPI.AppID) if err != nil { return &emptypb.Empty{}, err } - switch state.OperationType(inputReq.OperationType) { + switch state.OperationType(inputReq.GetOperationType()) { case state.OperationUpsert: setReq := state.SetRequest{ Key: key, // Limitation: // components that cannot handle byte array need to deserialize/serialize in // component specific way in components-contrib repo. - Value: req.Value, - Metadata: req.Metadata, + Value: req.GetValue(), + Metadata: req.GetMetadata(), } if hasEtag { setReq.ETag = &etag } - if req.Options != nil { + if req.GetOptions() != nil { setReq.Options = state.SetStateOption{ - Concurrency: stateConcurrencyToString(req.Options.Concurrency), - Consistency: stateConsistencyToString(req.Options.Consistency), + Concurrency: stateConcurrencyToString(req.GetOptions().GetConcurrency()), + Consistency: stateConsistencyToString(req.GetOptions().GetConsistency()), } } @@ -872,23 +872,23 @@ func (a *api) ExecuteStateTransaction(ctx context.Context, in *runtimev1pb.Execu case state.OperationDelete: delReq := state.DeleteRequest{ Key: key, - Metadata: req.Metadata, + Metadata: req.GetMetadata(), } if hasEtag { delReq.ETag = &etag } - if req.Options != nil { + if req.GetOptions() != nil { delReq.Options = state.DeleteStateOption{ - Concurrency: stateConcurrencyToString(req.Options.Concurrency), - Consistency: stateConsistencyToString(req.Options.Consistency), + Concurrency: stateConcurrencyToString(req.GetOptions().GetConcurrency()), + Consistency: stateConsistencyToString(req.GetOptions().GetConsistency()), } } operations = append(operations, delReq) default: - err := status.Errorf(codes.Unimplemented, messages.ErrNotSupportedStateOperation, inputReq.OperationType) + err := status.Errorf(codes.Unimplemented, messages.ErrNotSupportedStateOperation, inputReq.GetOperationType()) apiServerLogger.Debug(err) return &emptypb.Empty{}, err } @@ -903,12 +903,12 @@ func (a *api) ExecuteStateTransaction(ctx context.Context, in *runtimev1pb.Execu } } - if encryption.EncryptedStateStore(in.StoreName) { + if encryption.EncryptedStateStore(in.GetStoreName()) { for i, op := range operations { switch req := op.(type) { case state.SetRequest: data := []byte(fmt.Sprintf("%v", req.Value)) - val, err := encryption.TryEncryptValue(in.StoreName, data) + val, err := encryption.TryEncryptValue(in.GetStoreName(), data) if err != nil { err = status.Errorf(codes.Internal, messages.ErrStateTransaction, err.Error()) apiServerLogger.Debug(err) @@ -921,11 +921,11 @@ func (a *api) ExecuteStateTransaction(ctx context.Context, in *runtimev1pb.Execu } } - outboxEnabled := a.pubsubAdapter.Outbox().Enabled(in.StoreName) + outboxEnabled := a.pubsubAdapter.Outbox().Enabled(in.GetStoreName()) if outboxEnabled { span := diagUtils.SpanFromContext(ctx) corID, traceState := diag.TraceIDAndStateFromSpan(span) - trs, err := a.pubsubAdapter.Outbox().PublishInternal(ctx, in.StoreName, operations, a.UniversalAPI.AppID, corID, traceState) + trs, err := a.pubsubAdapter.Outbox().PublishInternal(ctx, in.GetStoreName(), operations, a.UniversalAPI.AppID, corID, traceState) if err != nil { err = status.Errorf(codes.Internal, messages.ErrPublishOutbox, err.Error()) apiServerLogger.Debug(err) @@ -937,18 +937,18 @@ func (a *api) ExecuteStateTransaction(ctx context.Context, in *runtimev1pb.Execu start := time.Now() policyRunner := resiliency.NewRunner[struct{}](ctx, - a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Statestore), + a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Statestore), ) storeReq := &state.TransactionalStateRequest{ Operations: operations, - Metadata: in.Metadata, + Metadata: in.GetMetadata(), } _, err := policyRunner(func(ctx context.Context) (struct{}, error) { return struct{}{}, transactionalStore.Multi(ctx, storeReq) }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.StateInvoked(ctx, in.StoreName, diag.StateTransaction, err == nil, elapsed) + diag.DefaultComponentMonitoring.StateInvoked(ctx, in.GetStoreName(), diag.StateTransaction, err == nil, elapsed) if err != nil { err = status.Errorf(codes.Internal, messages.ErrStateTransaction, err.Error()) @@ -964,17 +964,17 @@ func (a *api) RegisterActorTimer(ctx context.Context, in *runtimev1pb.RegisterAc } req := &actors.CreateTimerRequest{ - Name: in.Name, - ActorID: in.ActorId, - ActorType: in.ActorType, - DueTime: in.DueTime, - Period: in.Period, - TTL: in.Ttl, - Callback: in.Callback, + Name: in.GetName(), + ActorID: in.GetActorId(), + ActorType: in.GetActorType(), + DueTime: in.GetDueTime(), + Period: in.GetPeriod(), + TTL: in.GetTtl(), + Callback: in.GetCallback(), } - if in.Data != nil { - j, err := json.Marshal(in.Data) + if in.GetData() != nil { + j, err := json.Marshal(in.GetData()) if err != nil { return &emptypb.Empty{}, err } @@ -990,9 +990,9 @@ func (a *api) UnregisterActorTimer(ctx context.Context, in *runtimev1pb.Unregist } req := &actors.DeleteTimerRequest{ - Name: in.Name, - ActorID: in.ActorId, - ActorType: in.ActorType, + Name: in.GetName(), + ActorID: in.GetActorId(), + ActorType: in.GetActorType(), } err := a.UniversalAPI.Actors.DeleteTimer(ctx, req) @@ -1005,16 +1005,16 @@ func (a *api) RegisterActorReminder(ctx context.Context, in *runtimev1pb.Registe } req := &actors.CreateReminderRequest{ - Name: in.Name, - ActorID: in.ActorId, - ActorType: in.ActorType, - DueTime: in.DueTime, - Period: in.Period, - TTL: in.Ttl, + Name: in.GetName(), + ActorID: in.GetActorId(), + ActorType: in.GetActorType(), + DueTime: in.GetDueTime(), + Period: in.GetPeriod(), + TTL: in.GetTtl(), } - if in.Data != nil { - j, err := json.Marshal(in.Data) + if in.GetData() != nil { + j, err := json.Marshal(in.GetData()) if err != nil { return &emptypb.Empty{}, err } @@ -1034,9 +1034,9 @@ func (a *api) UnregisterActorReminder(ctx context.Context, in *runtimev1pb.Unreg } req := &actors.DeleteReminderRequest{ - Name: in.Name, - ActorID: in.ActorId, - ActorType: in.ActorType, + Name: in.GetName(), + ActorID: in.GetActorId(), + ActorType: in.GetActorType(), } err := a.UniversalAPI.Actors.DeleteReminder(ctx, req) @@ -1052,9 +1052,9 @@ func (a *api) GetActorState(ctx context.Context, in *runtimev1pb.GetActorStateRe return nil, err } - actorType := in.ActorType - actorID := in.ActorId - key := in.Key + actorType := in.GetActorType() + actorID := in.GetActorId() + key := in.GetKey() hosted := a.UniversalAPI.Actors.IsActorHosted(ctx, &actors.ActorHostedRequest{ ActorType: actorType, @@ -1091,17 +1091,17 @@ func (a *api) ExecuteActorStateTransaction(ctx context.Context, in *runtimev1pb. return nil, err } - actorType := in.ActorType - actorID := in.ActorId + actorType := in.GetActorType() + actorID := in.GetActorId() actorOps := []actors.TransactionalOperation{} - for _, op := range in.Operations { + for _, op := range in.GetOperations() { var actorOp actors.TransactionalOperation - switch op.OperationType { + switch op.GetOperationType() { case string(state.OperationUpsert): setReq := map[string]any{ - "key": op.Key, - "value": op.Value.Value, + "key": op.GetKey(), + "value": op.GetValue().GetValue(), // Actor state do not user other attributes from state request. } if meta := op.GetMetadata(); len(meta) > 0 { @@ -1114,7 +1114,7 @@ func (a *api) ExecuteActorStateTransaction(ctx context.Context, in *runtimev1pb. } case string(state.OperationDelete): delReq := map[string]interface{}{ - "key": op.Key, + "key": op.GetKey(), // Actor state do not user other attributes from state request. } @@ -1124,7 +1124,7 @@ func (a *api) ExecuteActorStateTransaction(ctx context.Context, in *runtimev1pb. } default: - err := status.Errorf(codes.Unimplemented, messages.ErrNotSupportedStateOperation, op.OperationType) + err := status.Errorf(codes.Unimplemented, messages.ErrNotSupportedStateOperation, op.GetOperationType()) apiServerLogger.Debug(err) return &emptypb.Empty{}, err } @@ -1166,15 +1166,15 @@ func (a *api) InvokeActor(ctx context.Context, in *runtimev1pb.InvokeActorReques return response, err } - policyDef := a.UniversalAPI.Resiliency.ActorPreLockPolicy(in.ActorType, in.ActorId) + policyDef := a.UniversalAPI.Resiliency.ActorPreLockPolicy(in.GetActorType(), in.GetActorId()) - reqMetadata := make(map[string][]string, len(in.Metadata)) - for k, v := range in.Metadata { + reqMetadata := make(map[string][]string, len(in.GetMetadata())) + for k, v := range in.GetMetadata() { reqMetadata[k] = []string{v} } - req := invokev1.NewInvokeMethodRequest(in.Method). - WithActor(in.ActorType, in.ActorId). - WithRawDataBytes(in.Data). + req := invokev1.NewInvokeMethodRequest(in.GetMethod()). + WithActor(in.GetActorType(), in.GetActorId()). + WithRawDataBytes(in.GetData()). WithMetadata(reqMetadata) if policyDef != nil { req.WithReplay(policyDef.HasRetries()) @@ -1249,30 +1249,30 @@ func (a *api) getConfigurationStore(name string) (configuration.Store, error) { func (a *api) GetConfiguration(ctx context.Context, in *runtimev1pb.GetConfigurationRequest) (*runtimev1pb.GetConfigurationResponse, error) { response := &runtimev1pb.GetConfigurationResponse{} - store, err := a.getConfigurationStore(in.StoreName) + store, err := a.getConfigurationStore(in.GetStoreName()) if err != nil { apiServerLogger.Debug(err) return response, err } req := configuration.GetRequest{ - Keys: in.Keys, - Metadata: in.Metadata, + Keys: in.GetKeys(), + Metadata: in.GetMetadata(), } start := time.Now() policyRunner := resiliency.NewRunner[*configuration.GetResponse](ctx, - a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Configuration), + a.UniversalAPI.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Configuration), ) getResponse, err := policyRunner(func(ctx context.Context) (*configuration.GetResponse, error) { return store.Get(ctx, &req) }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.ConfigurationInvoked(ctx, in.StoreName, diag.Get, err == nil, elapsed) + diag.DefaultComponentMonitoring.ConfigurationInvoked(ctx, in.GetStoreName(), diag.Get, err == nil, elapsed) if err != nil { - err = status.Errorf(codes.Internal, messages.ErrConfigurationGet, req.Keys, in.StoreName, err.Error()) + err = status.Errorf(codes.Internal, messages.ErrConfigurationGet, req.Keys, in.GetStoreName(), err.Error()) apiServerLogger.Debug(err) return response, err } @@ -1342,7 +1342,7 @@ func (h *configurationEventHandler) updateEventHandler(ctx context.Context, e *c } func (a *api) SubscribeConfiguration(request *runtimev1pb.SubscribeConfigurationRequest, stream runtimev1pb.Dapr_SubscribeConfigurationServer) error { //nolint:nosnakecase - store, err := a.getConfigurationStore(request.StoreName) + store, err := a.getConfigurationStore(request.GetStoreName()) if err != nil { apiServerLogger.Debug(err) return err @@ -1351,7 +1351,7 @@ func (a *api) SubscribeConfiguration(request *runtimev1pb.SubscribeConfiguration handler := &configurationEventHandler{ readyCh: make(chan struct{}), api: a, - storeName: request.StoreName, + storeName: request.GetStoreName(), serverStream: stream, } // Prevents a leak if we return with an error @@ -1360,7 +1360,7 @@ func (a *api) SubscribeConfiguration(request *runtimev1pb.SubscribeConfiguration // Subscribe subscribeCtx, subscribeCancel := context.WithCancel(stream.Context()) defer subscribeCancel() - slices.Sort(request.Keys) + slices.Sort(request.GetKeys()) subscribeID, err := a.subscribeConfiguration(subscribeCtx, request, handler, store) if err != nil { // Error has already been logged @@ -1394,7 +1394,7 @@ func (a *api) SubscribeConfiguration(request *runtimev1pb.SubscribeConfiguration // Unsubscribe // We must use a background context here because stream.Context is likely canceled already - err = a.unsubscribeConfiguration(context.Background(), subscribeID, request.StoreName, store) + err = a.unsubscribeConfiguration(context.Background(), subscribeID, request.GetStoreName(), store) if err != nil { // Error has already been logged return err @@ -1408,24 +1408,24 @@ func (a *api) SubscribeConfiguration(request *runtimev1pb.SubscribeConfiguration func (a *api) subscribeConfiguration(ctx context.Context, request *runtimev1pb.SubscribeConfigurationRequest, handler *configurationEventHandler, store configuration.Store) (subscribeID string, err error) { componentReq := &configuration.SubscribeRequest{ - Keys: request.Keys, + Keys: request.GetKeys(), Metadata: request.GetMetadata(), } // TODO(@laurence) deal with failed subscription and retires start := time.Now() policyRunner := resiliency.NewRunner[string](ctx, - a.UniversalAPI.Resiliency.ComponentOutboundPolicy(request.StoreName, resiliency.Configuration), + a.UniversalAPI.Resiliency.ComponentOutboundPolicy(request.GetStoreName(), resiliency.Configuration), ) subscribeID, err = policyRunner(func(ctx context.Context) (string, error) { return store.Subscribe(ctx, componentReq, handler.updateEventHandler) }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.ConfigurationInvoked(context.Background(), request.StoreName, diag.ConfigurationSubscribe, err == nil, elapsed) + diag.DefaultComponentMonitoring.ConfigurationInvoked(context.Background(), request.GetStoreName(), diag.ConfigurationSubscribe, err == nil, elapsed) if err != nil { - err = status.Errorf(codes.InvalidArgument, messages.ErrConfigurationSubscribe, componentReq.Keys, request.StoreName, err) + err = status.Errorf(codes.InvalidArgument, messages.ErrConfigurationSubscribe, componentReq.Keys, request.GetStoreName(), err) apiServerLogger.Debug(err) return "", err } diff --git a/pkg/grpc/api_actor_test.go b/pkg/grpc/api_actor_test.go index 000ade6d12e..f1932558f07 100644 --- a/pkg/grpc/api_actor_test.go +++ b/pkg/grpc/api_actor_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/anypb" @@ -180,11 +181,11 @@ func TestGetActorState(t *testing.T) { }) // assert - assert.Nil(t, err) - assert.Equal(t, data, res.Data) + require.NoError(t, err) + assert.Equal(t, data, res.GetData()) assert.Equal(t, map[string]string{ "ttlExpireTime": "2020-10-02T22:00:00Z", - }, res.Metadata) + }, res.GetMetadata()) mockActors.AssertNumberOfCalls(t, "GetState", 1) }) } @@ -279,7 +280,7 @@ func TestExecuteActorStateTransaction(t *testing.T) { }) // assert - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, res) mockActors.AssertNumberOfCalls(t, "TransactionalStateOperation", 1) }) @@ -361,7 +362,7 @@ func TestInvokeActorWithResiliency(t *testing.T) { client := runtimev1pb.NewDaprClient(clientConn) _, err := client.InvokeActor(context.Background(), req) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, codes.OK, status.Code(err)) assert.Equal(t, 2, failingActors.Failure.CallCount("failingActor")) }) diff --git a/pkg/grpc/api_crypto.go b/pkg/grpc/api_crypto.go index 5f1220b8ae2..03bd5aaed34 100644 --- a/pkg/grpc/api_crypto.go +++ b/pkg/grpc/api_crypto.go @@ -46,24 +46,24 @@ func (a *api) EncryptAlpha1(stream runtimev1pb.Dapr_EncryptAlpha1Server) (err er } // Validate required options - if reqProto.Options == nil { + if reqProto.GetOptions() == nil { err = messages.ErrBadRequest.WithFormat("first message does not contain the required options") a.Logger.Debug(err) return err } - if reqProto.Options.KeyName == "" { + if reqProto.GetOptions().GetKeyName() == "" { err = messages.ErrBadRequest.WithFormat("missing property 'keyName' in the options message") a.Logger.Debug(err) return err } - if reqProto.Options.KeyWrapAlgorithm == "" { + if reqProto.GetOptions().GetKeyWrapAlgorithm() == "" { err = messages.ErrBadRequest.WithFormat("missing property 'keyWrapAlgorithm' in the options message") a.Logger.Debug(err) return err } // Validate the request and get the component - component, err := a.CryptoValidateRequest(reqProto.Options.ComponentName) + component, err := a.CryptoValidateRequest(reqProto.GetOptions().GetComponentName()) if err != nil { // Error is already logged return err @@ -71,18 +71,18 @@ func (a *api) EncryptAlpha1(stream runtimev1pb.Dapr_EncryptAlpha1Server) (err er // Options encOpts := encv1.EncryptOptions{ - KeyName: reqProto.Options.KeyName, - Algorithm: encv1.KeyAlgorithm(strings.ToUpper(reqProto.Options.KeyWrapAlgorithm)), - WrapKeyFn: a.CryptoGetWrapKeyFn(stream.Context(), reqProto.Options.ComponentName, component), + KeyName: reqProto.GetOptions().GetKeyName(), + Algorithm: encv1.KeyAlgorithm(strings.ToUpper(reqProto.GetOptions().GetKeyWrapAlgorithm())), + WrapKeyFn: a.CryptoGetWrapKeyFn(stream.Context(), reqProto.GetOptions().GetComponentName(), component), // The next values are optional and could be empty - OmitKeyName: reqProto.Options.OmitDecryptionKeyName, - DecryptionKeyName: reqProto.Options.DecryptionKeyName, + OmitKeyName: reqProto.GetOptions().GetOmitDecryptionKeyName(), + DecryptionKeyName: reqProto.GetOptions().GetDecryptionKeyName(), } // Set the cipher if present - if reqProto.Options.DataEncryptionCipher != "" { - encOpts.Cipher = ptr.Of(encv1.Cipher(strings.ToUpper(reqProto.Options.DataEncryptionCipher))) + if reqProto.GetOptions().GetDataEncryptionCipher() != "" { + encOpts.Cipher = ptr.Of(encv1.Cipher(strings.ToUpper(reqProto.GetOptions().GetDataEncryptionCipher()))) } // Process the request as a stream @@ -101,14 +101,14 @@ func (a *api) DecryptAlpha1(stream runtimev1pb.Dapr_DecryptAlpha1Server) (err er } // Validate required options - if reqProto.Options == nil { + if reqProto.GetOptions() == nil { err = messages.ErrBadRequest.WithFormat("first message does not contain the required options") a.Logger.Debug(err) return err } // Validate the request and get the component - component, err := a.CryptoValidateRequest(reqProto.Options.ComponentName) + component, err := a.CryptoValidateRequest(reqProto.GetOptions().GetComponentName()) if err != nil { // Error is already logged return err @@ -116,10 +116,10 @@ func (a *api) DecryptAlpha1(stream runtimev1pb.Dapr_DecryptAlpha1Server) (err er // Options decOpts := encv1.DecryptOptions{ - UnwrapKeyFn: a.CryptoGetUnwrapKeyFn(stream.Context(), reqProto.Options.ComponentName, component), + UnwrapKeyFn: a.CryptoGetUnwrapKeyFn(stream.Context(), reqProto.GetOptions().GetComponentName(), component), // The next values are optional and could be empty - KeyName: reqProto.Options.KeyName, + KeyName: reqProto.GetOptions().GetKeyName(), } // Process the request as a stream diff --git a/pkg/grpc/api_crypto_test.go b/pkg/grpc/api_crypto_test.go index 1fa89b09491..7b51f19d1df 100644 --- a/pkg/grpc/api_crypto_test.go +++ b/pkg/grpc/api_crypto_test.go @@ -566,13 +566,13 @@ func cryptoSendRequest(stream grpc.ClientStream, send []runtimev1pb.CryptoReques payload := recv.GetPayload() if payload != nil { - if payload.Seq != seq { - return nil, fmt.Errorf("expected sequence %d but got %d", seq, payload.Seq) + if payload.GetSeq() != seq { + return nil, fmt.Errorf("expected sequence %d but got %d", seq, payload.GetSeq()) } seq++ - if len(payload.Data) > 0 { - _, err = res.Write(payload.Data) + if len(payload.GetData()) > 0 { + _, err = res.Write(payload.GetData()) if err != nil { return nil, fmt.Errorf("failed to write data into buffer: %w", err) } diff --git a/pkg/grpc/api_daprinternal.go b/pkg/grpc/api_daprinternal.go index 3dbd565e3e7..dbbb87bee41 100644 --- a/pkg/grpc/api_daprinternal.go +++ b/pkg/grpc/api_daprinternal.go @@ -68,7 +68,7 @@ func (a *api) CallLocal(ctx context.Context, in *internalv1pb.InternalInvokeRequ statusCode = int32(codes.Internal) return nil, status.Errorf(codes.Internal, messages.ErrChannelInvoke, err) } else { - statusCode = res.Status().Code + statusCode = res.Status().GetCode() } defer res.Close() @@ -90,30 +90,30 @@ func (a *api) CallLocalStream(stream internalv1pb.ServiceInvocation_CallLocalStr if err != nil { return err } - if chunk.Request == nil || chunk.Request.Metadata == nil || chunk.Request.Message == nil { + if chunk.GetRequest() == nil || chunk.GetRequest().GetMetadata() == nil || chunk.GetRequest().GetMessage() == nil { return status.Errorf(codes.InvalidArgument, messages.ErrInternalInvokeRequest, "request does not contain the required fields in the leading chunk") } // Append the invoked method to the context's metadata so we can use it for tracing if md, ok := metadata.FromIncomingContext(stream.Context()); ok { - md[diagConsts.DaprCallLocalStreamMethodKey] = []string{chunk.Request.Message.Method} + md[diagConsts.DaprCallLocalStreamMethodKey] = []string{chunk.GetRequest().GetMessage().GetMethod()} } // Create the request object // The "rawData" of the object will be a pipe to which content is added chunk-by-chunk pr, pw := io.Pipe() - req, err := invokev1.InternalInvokeRequest(chunk.Request) + req, err := invokev1.InternalInvokeRequest(chunk.GetRequest()) if err != nil { return status.Errorf(codes.InvalidArgument, messages.ErrInternalInvokeRequest, err.Error()) } req.WithRawData(pr). - WithContentType(chunk.Request.Message.ContentType) + WithContentType(chunk.GetRequest().GetMessage().GetContentType()) defer req.Close() // If the data has a type_url, set that in the object too // This is necessary to support the gRPC->gRPC service invocation (legacy, non-proxy) path correctly // (Note that GetTypeUrl could return an empty value, so this call becomes a no-op) - req.WithDataTypeURL(chunk.Request.Message.GetData().GetTypeUrl()) + req.WithDataTypeURL(chunk.GetRequest().GetMessage().GetData().GetTypeUrl()) ctx, cancel := context.WithCancel(stream.Context()) defer cancel() @@ -186,7 +186,7 @@ func (a *api) CallLocalStream(stream internalv1pb.ServiceInvocation_CallLocalStr return } - if chunk.Request != nil && (chunk.Request.Metadata != nil || chunk.Request.Message != nil) { + if chunk.GetRequest() != nil && (chunk.GetRequest().GetMetadata() != nil || chunk.GetRequest().GetMessage() != nil) { pw.CloseWithError(errors.New("request metadata found in non-leading chunk")) return } @@ -202,7 +202,7 @@ func (a *api) CallLocalStream(stream internalv1pb.ServiceInvocation_CallLocalStr return status.Errorf(codes.Internal, messages.ErrChannelInvoke, err) } defer res.Close() - statusCode = res.Status().Code + statusCode = res.Status().GetCode() // Respond to the caller buf := invokev1.BufPool.Get().(*[]byte) @@ -262,7 +262,7 @@ func (a *api) CallLocalStream(stream internalv1pb.ServiceInvocation_CallLocalStr } // Send the chunk if there's anything to send - if proto.Response != nil || proto.Payload != nil { + if proto.GetResponse() != nil || proto.GetPayload() != nil { err = stream.SendMsg(proto) if err != nil { return fmt.Errorf("error sending message: %w", err) @@ -315,7 +315,7 @@ func (a *api) CallActor(ctx context.Context, in *internalv1pb.InternalInvokeRequ func (a *api) callLocalValidateACL(ctx context.Context, req *invokev1.InvokeMethodRequest) error { if a.accessControlList != nil { // An access control policy has been specified for the app. Apply the policies. - operation := req.Message().Method + operation := req.Message().GetMethod() var httpVerb commonv1pb.HTTPExtension_Verb //nolint:nosnakecase // Get the HTTP verb in case the application protocol is "http" appProtocolIsHTTP := a.UniversalAPI.AppConnectionConfig.Protocol.IsHTTP() @@ -345,9 +345,9 @@ func (a *api) callLocalValidateACL(ctx context.Context, req *invokev1.InvokeMeth // }() // ``` func (a *api) callLocalRecordRequest(req *internalv1pb.InternalInvokeRequest) (callerAppID string) { - callerIDHeader, ok := req.Metadata[invokev1.CallerIDHeader] - if ok && len(callerIDHeader.Values) > 0 { - callerAppID = callerIDHeader.Values[0] + callerIDHeader, ok := req.GetMetadata()[invokev1.CallerIDHeader] + if ok && len(callerIDHeader.GetValues()) > 0 { + callerAppID = callerIDHeader.GetValues()[0] } else { callerAppID = "unknown" } diff --git a/pkg/grpc/api_daprinternal_test.go b/pkg/grpc/api_daprinternal_test.go index bd88c7eac4d..9914c141e61 100644 --- a/pkg/grpc/api_daprinternal_test.go +++ b/pkg/grpc/api_daprinternal_test.go @@ -192,12 +192,12 @@ func TestCallLocalStream(t *testing.T) { pd, err := request.ProtoWithData() require.NoError(t, err) - require.NotNil(t, pd.Message.Data) + require.NotNil(t, pd.GetMessage().GetData()) err = st.Send(&internalv1pb.InternalInvokeRequestStream{ Request: request.Proto(), Payload: &commonv1pb.StreamPayload{ - Data: pd.Message.Data.Value, + Data: pd.GetMessage().GetData().GetValue(), Seq: 0, }, }) @@ -222,7 +222,7 @@ func TestCallRemoteAppWithTracing(t *testing.T) { defer request.Close() resp, err := client.CallLocal(context.Background(), request.Proto()) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, resp.GetMessage(), "failed to generate trace context with app call") } @@ -240,6 +240,6 @@ func TestCallActorWithTracing(t *testing.T) { defer request.Close() resp, err := client.CallActor(context.Background(), request.Proto()) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, resp.GetMessage(), "failed to generate trace context with actor call") } diff --git a/pkg/grpc/api_test.go b/pkg/grpc/api_test.go index 24f764b866f..91e6d7bf1a6 100644 --- a/pkg/grpc/api_test.go +++ b/pkg/grpc/api_test.go @@ -171,8 +171,8 @@ func (m *mockGRPCAPI) CallLocalStream(stream internalv1pb.ServiceInvocation_Call if err != nil { return err } - if pd.Message != nil && pd.Message.Data != nil { - data = pd.Message.Data.Value + if pd.GetMessage() != nil && pd.GetMessage().GetData() != nil { + data = pd.GetMessage().GetData().GetValue() } stream.Send(&internalv1pb.InternalInvokeResponseStream{ @@ -420,9 +420,9 @@ func TestAPIToken(t *testing.T) { assert.Equal(t, "Not Found", s.Message()) errInfo := s.Details()[0].(*epb.ErrorInfo) - assert.Equal(t, 1, len(s.Details())) - assert.Equal(t, "404", errInfo.Metadata["http.code"]) - assert.Equal(t, "fakeDirectMessageResponse", errInfo.Metadata["http.error_message"]) + assert.Len(t, s.Details(), 1) + assert.Equal(t, "404", errInfo.GetMetadata()["http.code"]) + assert.Equal(t, "fakeDirectMessageResponse", errInfo.GetMetadata()["http.error_message"]) }) t.Run("stream", func(t *testing.T) { @@ -438,8 +438,8 @@ func TestAPIToken(t *testing.T) { // The request was invalid so we should get an error about the actor runtime (which means we passed the auth middleware and are hitting the API as expected) var m any err = stream.RecvMsg(&m) - assert.Error(t, err) - assert.ErrorContains(t, err, "actor runtime") + require.Error(t, err) + require.ErrorContains(t, err, "actor runtime") }) }) @@ -501,7 +501,7 @@ func TestAPIToken(t *testing.T) { // We should get an Unauthenticated error var m any err = stream.RecvMsg(&m) - assert.Error(t, err) + require.Error(t, err) s, ok := status.FromError(err) assert.True(t, ok) assert.Equal(t, codes.Unauthenticated, s.Code()) @@ -565,7 +565,7 @@ func TestAPIToken(t *testing.T) { // We should get an Unauthenticated error var m any err = stream.RecvMsg(&m) - assert.Error(t, err) + require.Error(t, err) s, ok := status.FromError(err) assert.True(t, ok) assert.Equal(t, codes.Unauthenticated, s.Code()) @@ -672,9 +672,9 @@ func TestInvokeServiceFromHTTPResponse(t *testing.T) { if tt.errHTTPCode != "" { errInfo := s.Details()[0].(*epb.ErrorInfo) - assert.Equal(t, 1, len(s.Details())) - assert.Equal(t, tt.errHTTPCode, errInfo.Metadata["http.code"]) - assert.Equal(t, tt.errHTTPMessage, errInfo.Metadata["http.error_message"]) + assert.Len(t, s.Details(), 1) + assert.Equal(t, tt.errHTTPCode, errInfo.GetMetadata()["http.code"]) + assert.Equal(t, tt.errHTTPMessage, errInfo.GetMetadata()["http.error_message"]) } }) } @@ -741,7 +741,7 @@ func TestInvokeServiceFromGRPCResponse(t *testing.T) { assert.Equal(t, "Unimplemented", s.Message()) errInfo := s.Details()[0].(*epb.ResourceInfo) - assert.Equal(t, 1, len(s.Details())) + assert.Len(t, s.Details(), 1) assert.Equal(t, "sidecar", errInfo.GetResourceType()) assert.Equal(t, "invoke/service", errInfo.GetResourceName()) assert.Equal(t, "Dapr", errInfo.GetOwner()) @@ -903,10 +903,10 @@ func TestGetSecret(t *testing.T) { resp, err := client.GetSecret(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err) - assert.Equal(t, resp.Data[tt.key], tt.expectedResponse, "Expected responses to be same") + require.NoError(t, err) + assert.Equal(t, tt.expectedResponse, resp.GetData()[tt.key], "Expected responses to be same") } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -979,10 +979,10 @@ func TestGetBulkSecret(t *testing.T) { resp, err := client.GetBulkSecret(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err) - assert.Equal(t, resp.Data[tt.key].Secrets[tt.key], tt.expectedResponse, "Expected responses to be same") + require.NoError(t, err) + assert.Equal(t, tt.expectedResponse, resp.GetData()[tt.key].GetSecrets()[tt.key], "Expected responses to be same") } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -1295,11 +1295,11 @@ func TestGetState(t *testing.T) { resp, err := client.GetState(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err) - assert.Equal(t, resp.Data, tt.expectedResponse.Data, "Expected response Data to be same") - assert.Equal(t, resp.Etag, tt.expectedResponse.Etag, "Expected response Etag to be same") + require.NoError(t, err) + assert.Equal(t, resp.GetData(), tt.expectedResponse.GetData(), "Expected response Data to be same") + assert.Equal(t, resp.GetEtag(), tt.expectedResponse.GetEtag(), "Expected response Etag to be same") } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -1433,10 +1433,10 @@ func TestGetConfiguration(t *testing.T) { resp, err := client.GetConfigurationAlpha1(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err) - assert.Equal(t, resp.Items, tt.expectedResponse.Items, "Expected response items to be same") + require.NoError(t, err) + assert.Equal(t, resp.GetItems(), tt.expectedResponse.GetItems(), "Expected response items to be same") } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -1449,10 +1449,10 @@ func TestGetConfiguration(t *testing.T) { resp, err := client.GetConfiguration(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err) - assert.Equal(t, resp.Items, tt.expectedResponse.Items, "Expected response items to be same") + require.NoError(t, err) + assert.Equal(t, resp.GetItems(), tt.expectedResponse.GetItems(), "Expected response items to be same") } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -1613,12 +1613,12 @@ func TestSubscribeConfiguration(t *testing.T) { rsp, err := resp.Recv() require.NoError(t, err) require.NotNil(t, rsp) - require.NotEmpty(t, rsp.Id) - require.Empty(t, rsp.Items) + require.NotEmpty(t, rsp.GetId()) + require.Empty(t, rsp.GetItems()) rsp, err = resp.Recv() require.NoError(t, err) - assert.Equal(t, tt.expectedResponse, rsp.Items, "Expected response items to be same") + assert.Equal(t, tt.expectedResponse, rsp.GetItems(), "Expected response items to be same") } else { const retry = 3 count := 0 @@ -1635,7 +1635,7 @@ func TestSubscribeConfiguration(t *testing.T) { _, err = resp.Recv() } assert.Equal(t, tt.expectedError, status.Code(err)) - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") } } } @@ -1816,12 +1816,12 @@ func TestUnSubscribeConfiguration(t *testing.T) { rsp, recvErr := resp.Recv() require.NoError(t, recvErr) require.NotNil(t, rsp) - if rsp.Items != nil { - assert.Equal(t, tt.expectedResponse, rsp.Items) + if rsp.GetItems() != nil { + assert.Equal(t, tt.expectedResponse, rsp.GetItems()) } else { - assert.Equal(t, mockSubscribeID, rsp.Id) + assert.Equal(t, mockSubscribeID, rsp.GetId()) } - subscribeID = rsp.Id + subscribeID = rsp.GetId() } require.NoError(t, err, "Error should be nil") _, err = client.UnsubscribeConfigurationAlpha1(context.Background(), &runtimev1pb.UnsubscribeConfigurationRequest{ @@ -1864,13 +1864,13 @@ func TestUnSubscribeConfiguration(t *testing.T) { time.Sleep(time.Millisecond * 10) rsp, recvErr := resp.Recv() assert.NotNil(t, rsp) - assert.NoError(t, recvErr) - if rsp.Items != nil { - assert.Equal(t, tt.expectedResponse, rsp.Items) + require.NoError(t, recvErr) + if rsp.GetItems() != nil { + assert.Equal(t, tt.expectedResponse, rsp.GetItems()) } else { - assert.Equal(t, mockSubscribeID, rsp.Id) + assert.Equal(t, mockSubscribeID, rsp.GetId()) } - subscribeID = rsp.Id + subscribeID = rsp.GetId() } require.NoError(t, err, "Error should be nil") _, err = client.UnsubscribeConfiguration(context.Background(), &runtimev1pb.UnsubscribeConfigurationRequest{ @@ -2070,22 +2070,22 @@ func TestGetBulkState(t *testing.T) { resp, err := client.GetBulkState(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err) + require.NoError(t, err) if len(tt.expectedResponse) == 0 { - assert.Equal(t, len(resp.Items), 0, "Expected response to be empty") + assert.Empty(t, resp.GetItems(), "Expected response to be empty") } else { - for i := 0; i < len(resp.Items); i++ { - if tt.expectedResponse[i].Error == "" { - assert.Equal(t, resp.Items[i].Data, tt.expectedResponse[i].Data, "Expected response Data to be same") - assert.Equal(t, resp.Items[i].Etag, tt.expectedResponse[i].Etag, "Expected response Etag to be same") + for i := 0; i < len(resp.GetItems()); i++ { + if tt.expectedResponse[i].GetError() == "" { + assert.Equal(t, resp.GetItems()[i].GetData(), tt.expectedResponse[i].GetData(), "Expected response Data to be same") + assert.Equal(t, resp.GetItems()[i].GetEtag(), tt.expectedResponse[i].GetEtag(), "Expected response Etag to be same") } else { - assert.Equal(t, resp.Items[i].Error, tt.expectedResponse[i].Error, "Expected response error to be same") + assert.Equal(t, resp.GetItems()[i].GetError(), tt.expectedResponse[i].GetError(), "Expected response error to be same") } } } } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -2391,7 +2391,7 @@ func TestPublishTopic(t *testing.T) { PubsubName: "pubsub", Topic: "topic", }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("no err: publish event request with topic, pubsub and ce metadata override", func(t *testing.T) { @@ -2404,7 +2404,7 @@ func TestPublishTopic(t *testing.T) { "cloudevent.pubsub": "overridepubsub", // noop -- if this modified the envelope the test would fail }, }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("err: publish event request with error-topic and pubsub", func(t *testing.T) { @@ -2455,7 +2455,7 @@ func TestPublishTopic(t *testing.T) { }, }, }) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, codes.InvalidArgument, status.Code(err)) assert.Contains(t, err.Error(), "entryId is duplicated") }) @@ -2478,7 +2478,7 @@ func TestPublishTopic(t *testing.T) { }, }, }) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, codes.InvalidArgument, status.Code(err)) assert.Contains(t, err.Error(), "not present for entry") }) @@ -2494,7 +2494,7 @@ func TestPublishTopic(t *testing.T) { PubsubName: "pubsub", Topic: "topic", }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("err: bulk publish event request with error-topic and pubsub", func(t *testing.T) { @@ -2582,8 +2582,8 @@ func TestBulkPublish(t *testing.T) { Topic: "topic", Entries: sampleEntries, }) - assert.NoError(t, err) - assert.Empty(t, res.FailedEntries) + require.NoError(t, err) + assert.Empty(t, res.GetFailedEntries()) }) t.Run("no failures with ce metadata override", func(t *testing.T) { @@ -2597,8 +2597,8 @@ func TestBulkPublish(t *testing.T) { "cloudevent.pubsub": "overridepubsub", // noop -- if this modified the envelope the test would fail }, }) - assert.NoError(t, err) - assert.Empty(t, res.FailedEntries) + require.NoError(t, err) + assert.Empty(t, res.GetFailedEntries()) }) t.Run("all failures from component", func(t *testing.T) { @@ -2609,9 +2609,9 @@ func TestBulkPublish(t *testing.T) { }) t.Log(res) // Full failure from component, so expecting no error - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, res) - assert.Equal(t, 4, len(res.FailedEntries)) + assert.Len(t, res.GetFailedEntries(), 4) }) t.Run("partial failures from component", func(t *testing.T) { @@ -2621,9 +2621,9 @@ func TestBulkPublish(t *testing.T) { Entries: sampleEntries, }) // Partial failure, so expecting no error - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, res) - assert.Equal(t, 2, len(res.FailedEntries)) + assert.Len(t, res.GetFailedEntries(), 2) }) } @@ -2644,18 +2644,18 @@ func TestInvokeBinding(t *testing.T) { client := runtimev1pb.NewDaprClient(clientConn) _, err := client.InvokeBinding(context.Background(), &runtimev1pb.InvokeBindingRequest{}) - assert.NoError(t, err) + require.NoError(t, err) _, err = client.InvokeBinding(context.Background(), &runtimev1pb.InvokeBindingRequest{Name: "error-binding"}) assert.Equal(t, codes.Internal, status.Code(err)) ctx := grpcMetadata.AppendToOutgoingContext(context.Background(), "traceparent", "Test") resp, err := client.InvokeBinding(ctx, &runtimev1pb.InvokeBindingRequest{Metadata: map[string]string{"userMetadata": "val1"}}) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, resp) - assert.Contains(t, resp.Metadata, "traceparent") - assert.Equal(t, resp.Metadata["traceparent"], "Test") - assert.Contains(t, resp.Metadata, "userMetadata") - assert.Equal(t, resp.Metadata["userMetadata"], "val1") + assert.Contains(t, resp.GetMetadata(), "traceparent") + assert.Equal(t, "Test", resp.GetMetadata()["traceparent"]) + assert.Contains(t, resp.GetMetadata(), "userMetadata") + assert.Equal(t, "val1", resp.GetMetadata()["userMetadata"]) } func TestTransactionStateStoreNotConfigured(t *testing.T) { @@ -2831,9 +2831,9 @@ func TestExecuteStateTransaction(t *testing.T) { _, err := client.ExecuteStateTransaction(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err) + require.NoError(t, err) } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -3021,17 +3021,17 @@ func TestQueryState(t *testing.T) { StoreName: "store1", Query: queryTestRequestOK, }) - assert.Equal(t, 1, len(resp.Results)) + assert.Len(t, resp.GetResults(), 1) assert.Equal(t, codes.OK, status.Code(err)) - if len(resp.Results) > 0 { - assert.NotNil(t, resp.Results[0].Data) + if len(resp.GetResults()) > 0 { + assert.NotNil(t, resp.GetResults()[0].GetData()) } resp, err = client.QueryStateAlpha1(context.Background(), &runtimev1pb.QueryStateRequest{ StoreName: "store1", Query: queryTestRequestNoRes, }) - assert.Equal(t, 0, len(resp.Results)) + assert.Empty(t, resp.GetResults()) assert.Equal(t, codes.OK, status.Code(err)) _, err = client.QueryStateAlpha1(context.Background(), &runtimev1pb.QueryStateRequest{ @@ -3130,9 +3130,9 @@ func TestGetConfigurationAPI(t *testing.T) { }) require.NoError(t, err) - require.NotNil(t, r.Items) - assert.Len(t, r.Items, 1) - assert.Equal(t, "val1", r.Items["key1"].Value) + require.NotNil(t, r.GetItems()) + assert.Len(t, r.GetItems(), 1) + assert.Equal(t, "val1", r.GetItems()["key1"].GetValue()) } } @@ -3177,15 +3177,15 @@ func TestSubscribeConfigurationAPI(t *testing.T) { break } - if update != nil && len(update.Items) > 0 { + if update != nil && len(update.GetItems()) > 0 { r = update break } } require.NotNil(t, r) - assert.Len(t, r.Items, 1) - assert.Equal(t, "val1", r.Items["key1"].Value) + assert.Len(t, r.GetItems(), 1) + assert.Equal(t, "val1", r.GetItems()["key1"].GetValue()) } } @@ -3219,16 +3219,16 @@ func TestSubscribeConfigurationAPI(t *testing.T) { break } - if update != nil && len(update.Items) > 0 { + if update != nil && len(update.GetItems()) > 0 { r = update break } } require.NotNil(t, r) - assert.Len(t, r.Items, 2) - assert.Equal(t, "val1", r.Items["key1"].Value) - assert.Equal(t, "val2", r.Items["key2"].Value) + assert.Len(t, r.GetItems(), 2) + assert.Equal(t, "val1", r.GetItems()["key1"].GetValue()) + assert.Equal(t, "val2", r.GetItems()["key2"].GetValue()) } } @@ -3303,7 +3303,7 @@ func TestStateAPIWithResiliency(t *testing.T) { StoreName: "failStore", Key: "failingGetKey", }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("failingGetKey")) }) @@ -3315,7 +3315,7 @@ func TestStateAPIWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("timeoutGetKey")) assert.Less(t, end.Sub(start), time.Second*30) }) @@ -3330,7 +3330,7 @@ func TestStateAPIWithResiliency(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("failingSetKey")) }) @@ -3347,7 +3347,7 @@ func TestStateAPIWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("timeoutSetKey")) assert.Less(t, end.Sub(start), time.Second*30) }) @@ -3357,7 +3357,7 @@ func TestStateAPIWithResiliency(t *testing.T) { StoreName: "failStore", Key: "failingDeleteKey", }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("failingDeleteKey")) }) @@ -3369,7 +3369,7 @@ func TestStateAPIWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("timeoutDeleteKey")) assert.Less(t, end.Sub(start), time.Second*30) }) @@ -3386,7 +3386,7 @@ func TestStateAPIWithResiliency(t *testing.T) { Keys: []string{"failingBulkGetKey", "goodBulkGetKey"}, }) - assert.Error(t, err) + require.Error(t, err) }) t.Run("bulk state set recovers from single key failure with resiliency", func(t *testing.T) { @@ -3404,7 +3404,7 @@ func TestStateAPIWithResiliency(t *testing.T) { }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("failingBulkSetKey")) assert.Equal(t, 1, failingStore.Failure.CallCount("goodBulkSetKey")) }) @@ -3426,7 +3426,7 @@ func TestStateAPIWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("timeoutBulkSetKey")) assert.Equal(t, 0, failingStore.Failure.CallCount("goodTimeoutBulkSetKey")) assert.Less(t, end.Sub(start), time.Second*30) @@ -3445,7 +3445,7 @@ func TestStateAPIWithResiliency(t *testing.T) { }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("failingMultiKey")) }) @@ -3462,7 +3462,7 @@ func TestStateAPIWithResiliency(t *testing.T) { }, }) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("timeoutMultiKey")) }) @@ -3473,7 +3473,7 @@ func TestStateAPIWithResiliency(t *testing.T) { Metadata: map[string]string{"key": "failingQueryKey"}, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("failingQueryKey")) }) @@ -3484,7 +3484,7 @@ func TestStateAPIWithResiliency(t *testing.T) { Metadata: map[string]string{"key": "timeoutQueryKey"}, }) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("timeoutQueryKey")) }) } @@ -3532,7 +3532,7 @@ func TestConfigurationAPIWithResiliency(t *testing.T) { Metadata: map[string]string{"key": "failingGetKey"}, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingConfigStore.Failure.CallCount("failingGetKey")) }) @@ -3543,7 +3543,7 @@ func TestConfigurationAPIWithResiliency(t *testing.T) { Metadata: map[string]string{"key": "timeoutGetKey"}, }) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingConfigStore.Failure.CallCount("timeoutGetKey")) }) @@ -3553,10 +3553,10 @@ func TestConfigurationAPIWithResiliency(t *testing.T) { Keys: []string{}, Metadata: map[string]string{"key": "failingSubscribeKey"}, }) - assert.NoError(t, err) + require.NoError(t, err) _, err = resp.Recv() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingConfigStore.Failure.CallCount("failingSubscribeKey")) }) @@ -3567,10 +3567,10 @@ func TestConfigurationAPIWithResiliency(t *testing.T) { Keys: []string{}, Metadata: map[string]string{"key": "timeoutSubscribeKey"}, }) - assert.NoError(t, err) + require.NoError(t, err) _, err = resp.Recv() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingConfigStore.Failure.CallCount("timeoutSubscribeKey")) }) } @@ -3613,7 +3613,7 @@ func TestSecretAPIWithResiliency(t *testing.T) { Key: "key", }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("key")) }) @@ -3626,7 +3626,7 @@ func TestSecretAPIWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("timeout")) assert.Less(t, end.Sub(start), time.Second*30) }) @@ -3637,7 +3637,7 @@ func TestSecretAPIWithResiliency(t *testing.T) { Metadata: map[string]string{"key": "bulk"}, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("bulk")) }) @@ -3649,7 +3649,7 @@ func TestSecretAPIWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("bulkTimeout")) assert.Less(t, end.Sub(start), time.Second*30) }) @@ -3703,8 +3703,8 @@ func TestServiceInvocationWithResiliency(t *testing.T) { require.NoError(t, err) assert.Equal(t, 2, failingDirectMessaging.Failure.CallCount("failingKey")) require.NotNil(t, res) - require.NotNil(t, res.Data) - assert.Equal(t, val, res.Data.Value) + require.NotNil(t, res.GetData()) + assert.Equal(t, val, res.GetData().GetValue()) }) t.Run("Test invoke direct message fails with timeout", func(t *testing.T) { @@ -3718,7 +3718,7 @@ func TestServiceInvocationWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingDirectMessaging.Failure.CallCount("timeoutKey")) assert.Less(t, end.Sub(start), time.Second*30) }) @@ -3732,7 +3732,7 @@ func TestServiceInvocationWithResiliency(t *testing.T) { }, }) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingDirectMessaging.Failure.CallCount("extraFailingKey")) }) @@ -3745,7 +3745,7 @@ func TestServiceInvocationWithResiliency(t *testing.T) { Data: &anypb.Any{Value: []byte("circuitBreakerKey")}, }, }) - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 5, failingDirectMessaging.Failure.CallCount("circuitBreakerKey")) // Additional requests should fail due to the circuit breaker. @@ -3756,7 +3756,7 @@ func TestServiceInvocationWithResiliency(t *testing.T) { Data: &anypb.Any{Value: []byte("circuitBreakerKey")}, }, }) - assert.Error(t, err) + require.Error(t, err) assert.Contains(t, err.Error(), "circuit breaker is open") assert.Equal(t, 5, failingDirectMessaging.Failure.CallCount("circuitBreakerKey")) }) @@ -3965,8 +3965,8 @@ func TestTryLock(t *testing.T) { ExpiryInSeconds: 1, } resp, err := api.TryLockAlpha1(context.Background(), req) - assert.NoError(t, err) - assert.Equal(t, true, resp.Success) + require.NoError(t, err) + assert.True(t, resp.GetSuccess()) }) } @@ -4084,8 +4084,8 @@ func TestUnlock(t *testing.T) { LockOwner: "owner", } resp, err := api.UnlockAlpha1(context.Background(), req) - assert.NoError(t, err) - assert.Equal(t, runtimev1pb.UnlockResponse_SUCCESS, resp.Status) //nolint:nosnakecase + require.NoError(t, err) + assert.Equal(t, runtimev1pb.UnlockResponse_SUCCESS, resp.GetStatus()) //nolint:nosnakecase }) } @@ -4209,17 +4209,17 @@ func TestMetadata(t *testing.T) { Key: "foo", Value: "bar", }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("Get Metadata", func(t *testing.T) { res, err := client.GetMetadata(context.Background(), &runtimev1pb.GetMetadataRequest{}) - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, "fakeAPI", res.Id) + assert.Equal(t, "fakeAPI", res.GetId()) bytes, err := json.Marshal(res) - assert.NoError(t, err) + require.NoError(t, err) expectedResponse := `{"id":"fakeAPI","active_actors_count":[{"type":"abcd","count":10},{"type":"xyz","count":5}],"registered_components":[{"name":"MockComponent1Name","type":"mock.component1Type","version":"v1.0","capabilities":["mock.feat.MockComponent1Name"]},{"name":"MockComponent2Name","type":"mock.component2Type","version":"v1.0","capabilities":["mock.feat.MockComponent2Name"]}],"extended_metadata":{"daprRuntimeVersion":"edge","foo":"bar","test":"value"},"subscriptions":[{"pubsub_name":"test","topic":"topic","rules":{"rules":[{"path":"path"}]},"dead_letter_topic":"dead"}],"http_endpoints":[{"name":"MockHTTPEndpoint"}],"app_connection_properties":{"port":5000,"protocol":"grpc","channel_address":"1.2.3.4","max_concurrency":10,"health":{"health_probe_interval":"10s","health_probe_timeout":"5s","health_threshold":3}},"runtime_version":"edge","actor_runtime":{"runtime_status":2,"active_actors":[{"type":"abcd","count":10},{"type":"xyz","count":5}],"host_ready":true}}` assert.Equal(t, expectedResponse, string(bytes)) diff --git a/pkg/grpc/endpoints_test.go b/pkg/grpc/endpoints_test.go index cab6546301e..97cb602d83c 100644 --- a/pkg/grpc/endpoints_test.go +++ b/pkg/grpc/endpoints_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "github.com/dapr/dapr/pkg/config" @@ -64,20 +65,20 @@ func testMiddleware(u grpc.UnaryServerInterceptor, s grpc.StreamServerIntercepto FullMethod: method, }, hUnary) if expectErr { - assert.Error(t, err) - assert.ErrorContains(t, err, "Unimplemented") + require.Error(t, err) + require.ErrorContains(t, err, "Unimplemented") } else { - assert.NoError(t, err) + require.NoError(t, err) } err = s(nil, nil, &grpc.StreamServerInfo{ FullMethod: method, }, hStream) if expectErr { - assert.Error(t, err) - assert.ErrorContains(t, err, "Unimplemented") + require.Error(t, err) + require.ErrorContains(t, err, "Unimplemented") } else { - assert.NoError(t, err) + require.NoError(t, err) } } } diff --git a/pkg/grpc/manager/pool_test.go b/pkg/grpc/manager/pool_test.go index d4d67a1c464..7e72d96a3e0 100644 --- a/pkg/grpc/manager/pool_test.go +++ b/pkg/grpc/manager/pool_test.go @@ -152,7 +152,7 @@ func TestConnectionPool(t *testing.T) { // Destroy all connections cp.DestroyAll() - require.Len(t, cp.connections, 0) + require.Empty(t, cp.connections) require.True(t, conns[0].Closed) require.True(t, conns[1].Closed) }) @@ -236,7 +236,7 @@ func TestConnectionPool(t *testing.T) { // Destroy all connections cp.DestroyAll() - require.Len(t, cp.connections, 0) + require.Empty(t, cp.connections) require.True(t, conns[0].Closed) require.True(t, conns[1].Closed) }) @@ -244,7 +244,7 @@ func TestConnectionPool(t *testing.T) { testExpiredConn := func(minActiveConns int) func(t *testing.T) { // Reset the object cp.DestroyAll() - require.Len(t, cp.connections, 0) + require.Empty(t, cp.connections) conns[0].Closed = false conns[1].Closed = false cp.Register(conns[0]) @@ -338,7 +338,7 @@ func TestConnectionPool(t *testing.T) { cp.Purge() if minActiveConns == 0 { - require.Len(t, cp.connections, 0) + require.Empty(t, cp.connections) } else { require.Len(t, cp.connections, 1) require.Equal(t, conns[0], cp.connections[0].conn) diff --git a/pkg/grpc/proxy/codec/codec_test.go b/pkg/grpc/proxy/codec/codec_test.go index b88af6d7c30..47fe0a6b1d9 100644 --- a/pkg/grpc/proxy/codec/codec_test.go +++ b/pkg/grpc/proxy/codec/codec_test.go @@ -64,7 +64,7 @@ func TestProtoCodec_ReadYourWrites(t *testing.T) { require.NoError(t, err, "unmarshalling must go ok") require.Equal(t, p1.ProtoReflect(), p2.ProtoReflect()) - require.Equal(t, p1.Value, p2.Value) + require.Equal(t, p1.GetValue(), p2.GetValue()) } func TestProtoCodec_ReadYourWrites_v1(t *testing.T) { @@ -85,7 +85,7 @@ func TestProtoCodec_ReadYourWrites_v1(t *testing.T) { err = proxyCd.Unmarshal(out2p1, p2) require.NoError(t, err, "unmarshalling must go ok") - require.Equal(t, p1.Value, p2.Value) + require.Equal(t, p1.GetValue(), p2.GetValue()) } type errorPingRequest struct { @@ -106,12 +106,12 @@ func TestProtoCodec_ReadYourWrites_error(t *testing.T) { pv1 := &pbV1.PingRequest{} err = proxyCd.Unmarshal(d1, pv1) require.NoError(t, err, "unmarshalling must go ok") - require.Equal(t, pv1.Value, "test-ping") + require.Equal(t, "test-ping", pv1.GetValue()) pv2 := &pb.PingRequest{} err = proxyCd.Unmarshal(d1, pv2) require.NoError(t, err, "unmarshalling must go ok") - require.Equal(t, pv2.Value, "test-ping") + require.Equal(t, "test-ping", pv2.GetValue()) pe := &errorPingRequest{} err = proxyCd.Unmarshal(d1, pe) diff --git a/pkg/grpc/proxy/handler_test.go b/pkg/grpc/proxy/handler_test.go index 6cb57c14892..47fa29b3f07 100644 --- a/pkg/grpc/proxy/handler_test.go +++ b/pkg/grpc/proxy/handler_test.go @@ -121,7 +121,7 @@ func (s *assertingService) Ping(ctx context.Context, ping *pb.PingRequest) (*pb. // Send user trailers and headers. grpc.SendHeader(ctx, metadata.Pairs(serverHeaderMdKey, "I like cats.")) grpc.SetTrailer(ctx, metadata.Pairs(serverTrailerMdKey, "I also like dogs.")) - return &pb.PingResponse{Value: ping.Value, Counter: 42}, nil + return &pb.PingResponse{Value: ping.GetValue(), Counter: 42}, nil } func (s *assertingService) PingError(ctx context.Context, ping *pb.PingRequest) (*pb.Empty, error) { @@ -132,7 +132,7 @@ func (s *assertingService) PingList(ping *pb.PingRequest, stream pb.TestService_ // Send user trailers and headers. stream.SendHeader(metadata.Pairs(serverHeaderMdKey, "I like cats.")) for i := 0; i < countListResponses; i++ { - stream.Send(&pb.PingResponse{Value: ping.Value, Counter: int32(i)}) + stream.Send(&pb.PingResponse{Value: ping.GetValue(), Counter: int32(i)}) } stream.SetTrailer(metadata.Pairs(serverTrailerMdKey, "I also like dogs.")) return nil @@ -170,7 +170,7 @@ func (s *assertingService) PingStream(stream pb.TestService_PingStreamServer) er } return err } - pong := &pb.PingResponse{Value: ping.Value, Counter: counter} + pong := &pb.PingResponse{Value: ping.GetValue(), Counter: counter} if err := stream.Send(pong); err != nil { if s.expectPingStreamError.Load() { require.Error(s.t, err, "should have failed sending back a pong - test name: "+testName) @@ -210,9 +210,9 @@ func (s *proxyTestSuite) TestPingEmptyCarriesClientMetadata() { defer cancel() ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs(clientMdKey, "true")) out, err := s.testClient.PingEmpty(ctx, &pb.Empty{}) - require.NoError(s.T(), err, "PingEmpty should succeed without errors") - require.Equal(s.T(), pingDefaultValue, out.Value) - require.Equal(s.T(), int32(42), out.Counter) + s.Require().NoError(err, "PingEmpty should succeed without errors") + s.Require().Equal(pingDefaultValue, out.GetValue()) + s.Require().Equal(int32(42), out.GetCounter()) } func (s *proxyTestSuite) TestPingEmpty_StressTest() { @@ -229,22 +229,22 @@ func (s *proxyTestSuite) TestPingCarriesServerHeadersAndTrailers() { defer cancel() // This is an awkward calling convention... but meh. out, err := s.testClient.Ping(ctx, &pb.PingRequest{Value: "foo"}, grpc.Header(&headerMd), grpc.Trailer(&trailerMd)) - require.NoError(s.T(), err, "Ping should succeed without errors") - require.Equal(s.T(), "foo", out.Value) - require.Equal(s.T(), int32(42), out.Counter) - assert.Contains(s.T(), headerMd, serverHeaderMdKey, "server response headers must contain server data") - assert.Len(s.T(), trailerMd, 1, "server response trailers must contain server data") + s.Require().NoError(err, "Ping should succeed without errors") + s.Require().Equal("foo", out.GetValue()) + s.Require().Equal(int32(42), out.GetCounter()) + s.Contains(headerMd, serverHeaderMdKey, "server response headers must contain server data") + s.Len(trailerMd, 1, "server response headers must contain server data") } func (s *proxyTestSuite) TestPingErrorPropagatesAppError() { ctx, cancel := s.ctx() defer cancel() _, err := s.testClient.PingError(ctx, &pb.PingRequest{Value: "foo"}) - require.Error(s.T(), err, "PingError should never succeed") + s.Require().Error(err, "PingError should never succeed") st, ok := status.FromError(err) - require.True(s.T(), ok, "must get status from error") - assert.Equal(s.T(), codes.FailedPrecondition, st.Code()) - assert.Equal(s.T(), "Userspace error.", st.Message()) + s.Require().True(ok, "must get status from error") + s.Equal(codes.FailedPrecondition, st.Code()) + s.Equal("Userspace error.", st.Message()) } func (s *proxyTestSuite) TestDirectorErrorIsPropagated() { @@ -253,11 +253,11 @@ func (s *proxyTestSuite) TestDirectorErrorIsPropagated() { // See SetupSuite where the StreamDirector has a special case. ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs(rejectingMdKey, "true")) _, err := s.testClient.Ping(ctx, &pb.PingRequest{Value: "foo"}) - require.Error(s.T(), err, "Director should reject this RPC") + s.Require().Error(err, "Director should reject this RPC") st, ok := status.FromError(err) - require.True(s.T(), ok, "must get status from error") - assert.Equal(s.T(), codes.PermissionDenied, st.Code()) - assert.Equal(s.T(), "testing rejection", st.Message()) + s.Require().True(ok, "must get status from error") + s.Equal(codes.PermissionDenied, st.Code()) + s.Equal("testing rejection", st.Message()) } func (s *proxyTestSuite) TestPingStream_FullDuplexWorks() { @@ -270,19 +270,19 @@ func (s *proxyTestSuite) TestPingStream_FullDuplexWorks() { "dapr-test", s.T().Name(), )) stream, err := s.testClient.PingStream(ctx) - require.NoError(s.T(), err, "PingStream request should be successful") + s.Require().NoError(err, "PingStream request should be successful") for i := 0; i < countListResponses; i++ { if s.sendPing(stream, i) { break } } - require.NoError(s.T(), stream.CloseSend(), "no error on close send") + s.Require().NoError(stream.CloseSend(), "no error on close send") _, err = stream.Recv() - require.ErrorIs(s.T(), err, io.EOF, "stream should close with io.EOF, meaining OK") + s.Require().ErrorIs(err, io.EOF, "stream should close with io.EOF, meaining OK") // Check that the trailer headers are here. trailerMd := stream.Trailer() - assert.Len(s.T(), trailerMd, 1, "PingStream trailer headers user contain metadata") + s.Len(trailerMd, 1, "PingStream trailer headers user contain metadata") } func (s *proxyTestSuite) TestPingStream_StressTest() { @@ -310,7 +310,7 @@ func (s *proxyTestSuite) TestPingStream_MultipleThreads() { }() select { case <-time.After(time.Second * 10): - assert.Fail(s.T(), "Timed out waiting for proxy to return.") + s.Fail("Timed out waiting for proxy to return.") case <-ch: return } @@ -342,7 +342,7 @@ func (s *proxyTestSuite) TestRecoveryFromNetworkFailure() { func (s *proxyTestSuite) sendPing(stream pb.TestService_PingStreamClient, i int) (eof bool) { ping := &pb.PingRequest{Value: fmt.Sprintf("foo:%d", i)} err := stream.Send(ping) - require.NoError(s.T(), err, "sending to PingStream must not fail") + s.Require().NoError(err, "sending to PingStream must not fail") resp, err := stream.Recv() if errors.Is(err, io.EOF) { return true @@ -350,11 +350,11 @@ func (s *proxyTestSuite) sendPing(stream pb.TestService_PingStreamClient, i int) if i == 0 { // Check that the header arrives before all entries. headerMd, hErr := stream.Header() - require.NoError(s.T(), hErr, "PingStream headers should not error.") - assert.Contains(s.T(), headerMd, serverHeaderMdKey, "PingStream response headers user contain metadata") + s.Require().NoError(hErr, "PingStream headers should not error.") + s.Contains(headerMd, serverHeaderMdKey, "PingStream response headers user contain metadata") } - require.NotNil(s.T(), resp, "resp must not be nil") - assert.EqualValues(s.T(), i, resp.Counter, "ping roundtrip must succeed with the correct id") + s.Require().NotNil(resp, "resp must not be nil") + s.EqualValues(i, resp.GetCounter(), "ping roundtrip must succeed with the correct id") return false } @@ -367,11 +367,11 @@ func (s *proxyTestSuite) TestStreamConnectionInterrupted() { "dapr-test", s.T().Name(), )) stream, err := s.testClient.PingStream(ctx) - require.NoError(s.T(), err, "PingStream request should be successful") + s.Require().NoError(err, "PingStream request should be successful") // Send one message then interrupt the connection eof := s.sendPing(stream, 0) - require.False(s.T(), eof) + s.Require().False(eof) s.service.expectPingStreamError.Store(true) defer func() { @@ -382,15 +382,15 @@ func (s *proxyTestSuite) TestStreamConnectionInterrupted() { // Send another message, which should fail without resiliency ping := &pb.PingRequest{Value: fmt.Sprintf("foo:%d", 1)} err = stream.Send(ping) - require.Error(s.T(), err, "sending to PingStream must fail with a stopped server") + s.Require().Error(err, "sending to PingStream must fail with a stopped server") // Restart the server s.restartServer(s.T()) // Pings should still fail with EOF because the stream is closed err = stream.Send(ping) - require.Error(s.T(), err, "sending to PingStream must fail on a closed stream") - assert.ErrorIs(s.T(), err, io.EOF) + s.Require().Error(err, "sending to PingStream must fail on a closed stream") + s.Require().ErrorIs(err, io.EOF) } func (s *proxyTestSuite) TestPingSimulateFailure() { @@ -403,8 +403,8 @@ func (s *proxyTestSuite) TestPingSimulateFailure() { }() _, err := s.testClient.Ping(ctx, &pb.PingRequest{Value: "Ciao mamma guarda come mi diverto"}) - require.Error(s.T(), err, "Ping should return a simulated failure") - require.ErrorContains(s.T(), err, "Simulated failure") + s.Require().Error(err, "Ping should return a simulated failure") + s.Require().ErrorContains(err, "Simulated failure") } func (s *proxyTestSuite) setupResiliency() { @@ -445,19 +445,19 @@ func (s *proxyTestSuite) TestResiliencyUnary() { require.NoError(t, err, "Ping should succeed after retrying") require.NotNil(t, res, "Response should not be nil") require.Equal(t, int32(3), s.service.pingCallCount.Load()) - require.Equal(t, message, res.Value) + require.Equal(t, message, res.GetValue()) assertRequestSentMetrics(t, "unary", 3, nil) rows, err := view.RetrieveData(serviceInvocationResponseRecvName) - assert.NoError(t, err) - assert.Equal(t, 2, len(rows)) + require.NoError(t, err) + assert.Len(t, rows, 2) // 2 Ping failures - assert.Equal(t, diag.GetValueForObservationWithTagSet( - rows, map[tag.Tag]bool{diag.NewTag("status", strconv.Itoa(int(codes.Internal))): true}), int64(2)) + assert.Equal(t, int64(2), diag.GetValueForObservationWithTagSet( + rows, map[tag.Tag]bool{diag.NewTag("status", strconv.Itoa(int(codes.Internal))): true})) // 1 success - assert.Equal(t, diag.GetValueForObservationWithTagSet( - rows, map[tag.Tag]bool{diag.NewTag("status", strconv.Itoa(int(codes.OK))): true}), int64(1)) + assert.Equal(t, int64(1), diag.GetValueForObservationWithTagSet( + rows, map[tag.Tag]bool{diag.NewTag("status", strconv.Itoa(int(codes.OK))): true})) }) s.T().Run("timeouts", func(t *testing.T) { @@ -479,8 +479,8 @@ func (s *proxyTestSuite) TestResiliencyUnary() { require.Equal(t, int32(4), s.service.pingCallCount.Load()) grpcStatus, ok := status.FromError(err) - require.True(s.T(), ok, "Error should have a gRPC status code") - require.Equal(s.T(), codes.DeadlineExceeded, grpcStatus.Code()) + s.Require().True(ok, "Error should have a gRPC status code") + s.Require().Equal(codes.DeadlineExceeded, grpcStatus.Code()) // Sleep for 500ms before returning to allow all timed-out goroutines to catch up with the timeouts time.Sleep(500 * time.Millisecond) @@ -515,7 +515,7 @@ func (s *proxyTestSuite) TestResiliencyUnary() { res, err := s.testClient.Ping(ctx, &pb.PingRequest{Value: pingMsg}) require.NoErrorf(t, err, "Ping should succeed for operation %d:%d", i, j) require.NotNilf(t, res, "Response should not be nil for operation %d:%d", i, j) - require.Equalf(t, pingMsg, res.Value, "Value should match for operation %d:%d", i, j) + require.Equalf(t, pingMsg, res.GetValue(), "Value should match for operation %d:%d", i, j) } wg.Done() }(i) @@ -528,7 +528,7 @@ func (s *proxyTestSuite) TestResiliencyUnary() { }() select { case <-time.After(time.Second * 10): - assert.Fail(s.T(), "Timed out waiting for proxy to return.") + s.Fail("Timed out waiting for proxy to return.") case <-ch: } @@ -542,8 +542,8 @@ func (s *proxyTestSuite) TestResiliencyUnary() { func assertResponseReceiveMetricsSameCode(t *testing.T, requestType string, code codes.Code, expected int64) []*view.Row { t.Helper() rows, err := view.RetrieveData(serviceInvocationResponseRecvName) - assert.NoError(t, err) - assert.Equal(t, 1, len(rows)) + require.NoError(t, err) + assert.Len(t, rows, 1) count := diag.GetValueForObservationWithTagSet( rows, map[tag.Tag]bool{ diag.NewTag("status", strconv.Itoa(int(code))): true, @@ -556,8 +556,8 @@ func assertResponseReceiveMetricsSameCode(t *testing.T, requestType string, code func assertRequestSentMetrics(t *testing.T, requestType string, requestsSentExpected int64, assertEqualFn func(t assert.TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool) []*view.Row { t.Helper() rows, err := view.RetrieveData(serviceInvocationRequestSentName) - assert.NoError(t, err) - assert.Equal(t, 1, len(rows)) + require.NoError(t, err) + assert.Len(t, rows, 1) requestsSent := diag.GetValueForObservationWithTagSet( rows, map[tag.Tag]bool{diag.NewTag("type", requestType): true}) @@ -646,14 +646,14 @@ func (s *proxyTestSuite) TestResiliencyStreaming() { // At least 1200ms (2 * 600ms) should have passed require.GreaterOrEqual(t, time.Since(start), 1200*time.Millisecond) - require.NoError(s.T(), stream.CloseSend(), "no error on close send") + s.Require().NoError(stream.CloseSend(), "no error on close send") _, err = stream.Recv() - require.ErrorIs(s.T(), err, io.EOF, "stream should close with io.EOF, meaining OK") + s.Require().ErrorIs(err, io.EOF, "stream should close with io.EOF, meaining OK") assertRequestSentMetrics(t, "streaming", 1, nil) rows, err := view.RetrieveData(serviceInvocationResponseRecvName) - assert.NoError(t, err) - assert.Equal(t, 0, len(rows)) // no error so no response metric + require.NoError(t, err) + assert.Empty(t, rows) // no error so no response metric }) s.T().Run("simulate connection failures with retry", func(t *testing.T) { @@ -686,9 +686,9 @@ func (s *proxyTestSuite) TestResiliencyStreaming() { require.NotNil(t, res) } - require.NoError(s.T(), stream.CloseSend(), "no error on close send") + s.Require().NoError(stream.CloseSend(), "no error on close send") _, err = stream.Recv() - require.ErrorIs(s.T(), err, io.EOF, "stream should close with io.EOF, meaining OK") + s.Require().ErrorIs(err, io.EOF, "stream should close with io.EOF, meaining OK") assertRequestSentMetrics(t, "streaming", 2, nil) assertResponseReceiveMetricsSameCode(t, "streaming", codes.Unavailable, 1) @@ -698,7 +698,7 @@ func (s *proxyTestSuite) TestResiliencyStreaming() { func setupMetrics(s *proxyTestSuite) { s.T().Helper() metricsCleanup() - assert.NoError(s.T(), diag.DefaultMonitoring.Init(testAppID)) + s.Require().NoError(diag.DefaultMonitoring.Init(testAppID)) } func (s *proxyTestSuite) initServer() { @@ -718,7 +718,7 @@ func (s *proxyTestSuite) restartServer(t *testing.T) { srvPort := s.serverListener.Addr().(*net.TCPAddr).Port s.serverListener, err = net.Listen("tcp", "127.0.0.1:"+strconv.Itoa(srvPort)) - require.NoError(s.T(), err, "must not error while starting serverListener") + s.Require().NoError(err, "must not error while starting serverListener") s.T().Logf("re-starting grpc.Server at: %v", s.serverListener.Addr().String()) s.initServer() @@ -766,13 +766,13 @@ func (s *proxyTestSuite) SetupSuite() { pc := encoding.GetCodec((&codec.Proxy{}).Name()) dc := encoding.GetCodec("proto") - require.NotNil(s.T(), pc, "proxy codec must be registered") - require.NotNil(s.T(), dc, "default codec must be registered") + s.Require().NotNil(pc, "proxy codec must be registered") + s.Require().NotNil(dc, "default codec must be registered") s.proxyListener, err = net.Listen("tcp", "127.0.0.1:0") - require.NoError(s.T(), err, "must be able to allocate a port for proxyListener") + s.Require().NoError(err, "must be able to allocate a port for proxyListener") s.serverListener, err = net.Listen("tcp", "127.0.0.1:0") - require.NoError(s.T(), err, "must be able to allocate a port for serverListener") + s.Require().NoError(err, "must be able to allocate a port for serverListener") grpclog.SetLoggerV2(testingLog{s.T()}) @@ -837,7 +837,7 @@ func (s *proxyTestSuite) SetupSuite() { grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.CallContentSubtype((&codec.Proxy{}).Name())), ) - require.NoError(s.T(), err, "must not error on deferred client Dial") + s.Require().NoError(err, "must not error on deferred client Dial") s.testClient = pb.NewTestServiceClient(clientConn) } diff --git a/pkg/grpc/server_test.go b/pkg/grpc/server_test.go index 1c26938371f..3199e56362d 100644 --- a/pkg/grpc/server_test.go +++ b/pkg/grpc/server_test.go @@ -38,7 +38,7 @@ func TestGetMiddlewareOptions(t *testing.T) { serverOption := fakeServer.getMiddlewareOptions() - assert.Equal(t, 3, len(serverOption)) + assert.Len(t, serverOption, 3) }) t.Run("should not disable middleware even when SamplingRate is 0", func(t *testing.T) { @@ -52,7 +52,7 @@ func TestGetMiddlewareOptions(t *testing.T) { serverOption := fakeServer.getMiddlewareOptions() - assert.Equal(t, 3, len(serverOption)) + assert.Len(t, serverOption, 3) }) t.Run("should have api access rules middleware", func(t *testing.T) { @@ -75,7 +75,7 @@ func TestGetMiddlewareOptions(t *testing.T) { serverOption := fakeServer.getMiddlewareOptions() - assert.Equal(t, 3, len(serverOption)) + assert.Len(t, serverOption, 3) }) } @@ -98,7 +98,7 @@ func TestClose(t *testing.T) { server := NewAPIServer(a, serverConfig, config.TracingSpec{}, config.MetricSpec{}, config.APISpec{}, nil, nil) require.NoError(t, server.StartNonBlocking()) dapr_testing.WaitForListeningAddress(t, 5*time.Second, fmt.Sprintf("127.0.0.1:%d", port)) - assert.NoError(t, server.Close()) + require.NoError(t, server.Close()) }) t.Run("test close with api logging disabled", func(t *testing.T) { @@ -119,7 +119,7 @@ func TestClose(t *testing.T) { server := NewAPIServer(a, serverConfig, config.TracingSpec{}, config.MetricSpec{}, config.APISpec{}, nil, nil) require.NoError(t, server.StartNonBlocking()) dapr_testing.WaitForListeningAddress(t, 5*time.Second, fmt.Sprintf("127.0.0.1:%d", port)) - assert.NoError(t, server.Close()) + require.NoError(t, server.Close()) }) } @@ -176,7 +176,7 @@ func TestGrpcAPILoggingMiddlewares(t *testing.T) { timeStr, ok := logData["time"].(string) assert.True(t, ok) tt, err := time.Parse(time.RFC3339Nano, timeStr) - assert.NoError(t, err) + require.NoError(t, err) assert.InDelta(t, time.Now().Unix(), tt.Unix(), 120) // In our test the duration better be no more than 10ms! diff --git a/pkg/grpc/universalapi/api_lock.go b/pkg/grpc/universalapi/api_lock.go index 0d6cfe4c844..a8e3371b3d3 100644 --- a/pkg/grpc/universalapi/api_lock.go +++ b/pkg/grpc/universalapi/api_lock.go @@ -25,8 +25,8 @@ import ( func (a *UniversalAPI) TryLockAlpha1(ctx context.Context, req *runtimev1pb.TryLockRequest) (*runtimev1pb.TryLockResponse, error) { // 1. validate and find lock component - if req.ExpiryInSeconds <= 0 { - err := messages.ErrExpiryInSecondsNotPositive.WithFormat(req.StoreName) + if req.GetExpiryInSeconds() <= 0 { + err := messages.ErrExpiryInSecondsNotPositive.WithFormat(req.GetStoreName()) a.Logger.Debug(err) return &runtimev1pb.TryLockResponse{}, err } @@ -37,12 +37,12 @@ func (a *UniversalAPI) TryLockAlpha1(ctx context.Context, req *runtimev1pb.TryLo // 2. convert request compReq := &lock.TryLockRequest{ - ResourceID: req.ResourceId, - LockOwner: req.LockOwner, - ExpiryInSeconds: req.ExpiryInSeconds, + ResourceID: req.GetResourceId(), + LockOwner: req.GetLockOwner(), + ExpiryInSeconds: req.GetExpiryInSeconds(), } // modify key - compReq.ResourceID, err = lockLoader.GetModifiedLockKey(compReq.ResourceID, req.StoreName, a.AppID) + compReq.ResourceID, err = lockLoader.GetModifiedLockKey(compReq.ResourceID, req.GetStoreName(), a.AppID) if err != nil { err = messages.ErrTryLockFailed.WithFormat(err) a.Logger.Debug(err) @@ -51,7 +51,7 @@ func (a *UniversalAPI) TryLockAlpha1(ctx context.Context, req *runtimev1pb.TryLo // 3. delegate to the component policyRunner := resiliency.NewRunner[*lock.TryLockResponse](ctx, - a.Resiliency.ComponentOutboundPolicy(req.StoreName, resiliency.Lock), + a.Resiliency.ComponentOutboundPolicy(req.GetStoreName(), resiliency.Lock), ) resp, err := policyRunner(func(ctx context.Context) (*lock.TryLockResponse, error) { return store.TryLock(ctx, compReq) @@ -82,11 +82,11 @@ func (a *UniversalAPI) UnlockAlpha1(ctx context.Context, req *runtimev1pb.Unlock // 2. convert request compReq := &lock.UnlockRequest{ - ResourceID: req.ResourceId, - LockOwner: req.LockOwner, + ResourceID: req.GetResourceId(), + LockOwner: req.GetLockOwner(), } // modify key - compReq.ResourceID, err = lockLoader.GetModifiedLockKey(compReq.ResourceID, req.StoreName, a.AppID) + compReq.ResourceID, err = lockLoader.GetModifiedLockKey(compReq.ResourceID, req.GetStoreName(), a.AppID) if err != nil { err = messages.ErrUnlockFailed.WithFormat(err) a.Logger.Debug(err) @@ -95,7 +95,7 @@ func (a *UniversalAPI) UnlockAlpha1(ctx context.Context, req *runtimev1pb.Unlock // 3. delegate to the component policyRunner := resiliency.NewRunner[*lock.UnlockResponse](ctx, - a.Resiliency.ComponentOutboundPolicy(req.StoreName, resiliency.Lock), + a.Resiliency.ComponentOutboundPolicy(req.GetStoreName(), resiliency.Lock), ) resp, err := policyRunner(func(ctx context.Context) (*lock.UnlockResponse, error) { return store.Unlock(ctx, compReq) diff --git a/pkg/grpc/universalapi/api_metadata.go b/pkg/grpc/universalapi/api_metadata.go index a59e8a84417..fc3fda8065d 100644 --- a/pkg/grpc/universalapi/api_metadata.go +++ b/pkg/grpc/universalapi/api_metadata.go @@ -67,7 +67,7 @@ func (a *UniversalAPI) GetMetadata(ctx context.Context, in *runtimev1pb.GetMetad } // Health check path is not applicable for gRPC. - if protocol.Protocol(appConnectionProperties.Protocol).IsHTTP() { + if protocol.Protocol(appConnectionProperties.GetProtocol()).IsHTTP() { appConnectionProperties.Health.HealthCheckPath = a.AppConnectionConfig.HealthCheckHTTPPath } } @@ -124,7 +124,7 @@ func (a *UniversalAPI) GetMetadata(ctx context.Context, in *runtimev1pb.GetMetad // SetMetadata Sets value in extended metadata of the sidecar. func (a *UniversalAPI) SetMetadata(ctx context.Context, in *runtimev1pb.SetMetadataRequest) (*emptypb.Empty, error) { // Nop if the key is empty - if in.Key == "" { + if in.GetKey() == "" { return &emptypb.Empty{}, nil } @@ -132,7 +132,7 @@ func (a *UniversalAPI) SetMetadata(ctx context.Context, in *runtimev1pb.SetMetad if a.ExtendedMetadata == nil { a.ExtendedMetadata = make(map[string]string) } - a.ExtendedMetadata[in.Key] = in.Value + a.ExtendedMetadata[in.GetKey()] = in.GetValue() a.extendedMetadataLock.Unlock() return &emptypb.Empty{}, nil diff --git a/pkg/grpc/universalapi/api_metadata_test.go b/pkg/grpc/universalapi/api_metadata_test.go index dd0981aae3a..8672ece5e5f 100644 --- a/pkg/grpc/universalapi/api_metadata_test.go +++ b/pkg/grpc/universalapi/api_metadata_test.go @@ -110,7 +110,7 @@ func TestGetMetadata(t *testing.T) { require.NoError(t, err, "Expected no error") bytes, err := json.Marshal(response) - assert.NoError(t, err) + require.NoError(t, err) healthCheckJSON := "}," if tc.expectHealthCheckEnabled { diff --git a/pkg/grpc/universalapi/api_secrets.go b/pkg/grpc/universalapi/api_secrets.go index 2aa357c2dd2..1a938982513 100644 --- a/pkg/grpc/universalapi/api_secrets.go +++ b/pkg/grpc/universalapi/api_secrets.go @@ -27,25 +27,25 @@ import ( func (a *UniversalAPI) GetSecret(ctx context.Context, in *runtimev1pb.GetSecretRequest) (*runtimev1pb.GetSecretResponse, error) { var response *runtimev1pb.GetSecretResponse - component, err := a.secretsValidateRequest(in.StoreName) + component, err := a.secretsValidateRequest(in.GetStoreName()) if err != nil { return response, err } - if !a.isSecretAllowed(in.StoreName, in.Key) { - err = messages.ErrSecretPermissionDenied.WithFormat(in.Key, in.StoreName) + if !a.isSecretAllowed(in.GetStoreName(), in.GetKey()) { + err = messages.ErrSecretPermissionDenied.WithFormat(in.GetKey(), in.GetStoreName()) a.Logger.Debug(err) return response, err } req := secretstores.GetSecretRequest{ - Name: in.Key, - Metadata: in.Metadata, + Name: in.GetKey(), + Metadata: in.GetMetadata(), } start := time.Now() policyRunner := resiliency.NewRunner[*secretstores.GetSecretResponse](ctx, - a.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Secretstore), + a.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Secretstore), ) getResponse, err := policyRunner(func(ctx context.Context) (*secretstores.GetSecretResponse, error) { rResp, rErr := component.GetSecret(ctx, req) @@ -53,10 +53,10 @@ func (a *UniversalAPI) GetSecret(ctx context.Context, in *runtimev1pb.GetSecretR }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.SecretInvoked(ctx, in.StoreName, diag.Get, err == nil, elapsed) + diag.DefaultComponentMonitoring.SecretInvoked(ctx, in.GetStoreName(), diag.Get, err == nil, elapsed) if err != nil { - err = messages.ErrSecretGet.WithFormat(req.Name, in.StoreName, err.Error()) + err = messages.ErrSecretGet.WithFormat(req.Name, in.GetStoreName(), err.Error()) a.Logger.Debug(err) return response, err } @@ -72,18 +72,18 @@ func (a *UniversalAPI) GetSecret(ctx context.Context, in *runtimev1pb.GetSecretR func (a *UniversalAPI) GetBulkSecret(ctx context.Context, in *runtimev1pb.GetBulkSecretRequest) (*runtimev1pb.GetBulkSecretResponse, error) { var response *runtimev1pb.GetBulkSecretResponse - component, err := a.secretsValidateRequest(in.StoreName) + component, err := a.secretsValidateRequest(in.GetStoreName()) if err != nil { return response, err } req := secretstores.BulkGetSecretRequest{ - Metadata: in.Metadata, + Metadata: in.GetMetadata(), } start := time.Now() policyRunner := resiliency.NewRunner[*secretstores.BulkGetSecretResponse](ctx, - a.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Secretstore), + a.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Secretstore), ) getResponse, err := policyRunner(func(ctx context.Context) (*secretstores.BulkGetSecretResponse, error) { rResp, rErr := component.BulkGetSecret(ctx, req) @@ -91,10 +91,10 @@ func (a *UniversalAPI) GetBulkSecret(ctx context.Context, in *runtimev1pb.GetBul }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.SecretInvoked(ctx, in.StoreName, diag.BulkGet, err == nil, elapsed) + diag.DefaultComponentMonitoring.SecretInvoked(ctx, in.GetStoreName(), diag.BulkGet, err == nil, elapsed) if err != nil { - err = messages.ErrBulkSecretGet.WithFormat(in.StoreName, err.Error()) + err = messages.ErrBulkSecretGet.WithFormat(in.GetStoreName(), err.Error()) a.Logger.Debug(err) return response, err } @@ -104,10 +104,10 @@ func (a *UniversalAPI) GetBulkSecret(ctx context.Context, in *runtimev1pb.GetBul } filteredSecrets := map[string]map[string]string{} for key, v := range getResponse.Data { - if a.isSecretAllowed(in.StoreName, key) { + if a.isSecretAllowed(in.GetStoreName(), key) { filteredSecrets[key] = v } else { - a.Logger.Debugf(messages.ErrSecretPermissionDenied.WithFormat(key, in.StoreName).String()) + a.Logger.Debugf(messages.ErrSecretPermissionDenied.WithFormat(key, in.GetStoreName()).String()) } } diff --git a/pkg/grpc/universalapi/api_secrets_test.go b/pkg/grpc/universalapi/api_secrets_test.go index bb02d62377b..362313c1bee 100644 --- a/pkg/grpc/universalapi/api_secrets_test.go +++ b/pkg/grpc/universalapi/api_secrets_test.go @@ -43,13 +43,13 @@ func TestSecretStoreNotConfigured(t *testing.T) { t.Run("GetSecret", func(t *testing.T) { _, err := fakeAPI.GetSecret(context.Background(), &runtimev1pb.GetSecretRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrSecretStoreNotConfigured) + require.ErrorIs(t, err, messages.ErrSecretStoreNotConfigured) }) t.Run("GetBulkSecret", func(t *testing.T) { _, err := fakeAPI.GetBulkSecret(context.Background(), &runtimev1pb.GetBulkSecretRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrSecretStoreNotConfigured) + require.ErrorIs(t, err, messages.ErrSecretStoreNotConfigured) }) } @@ -178,10 +178,10 @@ func TestGetSecret(t *testing.T) { resp, err := fakeAPI.GetSecret(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err, "Expected no error") - assert.Equal(t, resp.Data[tt.key], tt.expectedResponse, "Expected responses to be same") + require.NoError(t, err, "Expected no error") + assert.Equal(t, tt.expectedResponse, resp.GetData()[tt.key], "Expected responses to be same") } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -242,10 +242,10 @@ func TestGetBulkSecret(t *testing.T) { resp, err := fakeAPI.GetBulkSecret(context.Background(), req) if !tt.errorExcepted { - assert.NoError(t, err, "Expected no error") - assert.Equal(t, resp.Data[tt.key].Secrets[tt.key], tt.expectedResponse, "Expected responses to be same") + require.NoError(t, err, "Expected no error") + assert.Equal(t, tt.expectedResponse, resp.GetData()[tt.key].GetSecrets()[tt.key], "Expected responses to be same") } else { - assert.Error(t, err, "Expected error") + require.Error(t, err, "Expected error") assert.Equal(t, tt.expectedError, status.Code(err)) } }) @@ -278,7 +278,7 @@ func TestSecretAPIWithResiliency(t *testing.T) { Key: "key", }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("key")) }) @@ -291,7 +291,7 @@ func TestSecretAPIWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("timeout")) assert.Less(t, end.Sub(start), time.Second*30) }) @@ -302,7 +302,7 @@ func TestSecretAPIWithResiliency(t *testing.T) { Metadata: map[string]string{"key": "bulk"}, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("bulk")) }) @@ -314,7 +314,7 @@ func TestSecretAPIWithResiliency(t *testing.T) { }) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingStore.Failure.CallCount("bulkTimeout")) assert.Less(t, end.Sub(start), time.Second*30) }) diff --git a/pkg/grpc/universalapi/api_shutdown_test.go b/pkg/grpc/universalapi/api_shutdown_test.go index 17b03a43583..d7f6407eeb2 100644 --- a/pkg/grpc/universalapi/api_shutdown_test.go +++ b/pkg/grpc/universalapi/api_shutdown_test.go @@ -18,7 +18,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" ) @@ -37,7 +37,7 @@ func TestShutdownEndpoint(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) _, err := fakeAPI.Shutdown(ctx, &runtimev1pb.ShutdownRequest{}) cancel() - assert.NoError(t, err, "Expected no error") + require.NoError(t, err, "Expected no error") select { case <-time.After(time.Second): t.Fatal("Did not shut down within 1 second") diff --git a/pkg/grpc/universalapi/api_state_query.go b/pkg/grpc/universalapi/api_state_query.go index f3d786f6a48..97d3b7112b0 100644 --- a/pkg/grpc/universalapi/api_state_query.go +++ b/pkg/grpc/universalapi/api_state_query.go @@ -45,7 +45,7 @@ func (a *UniversalAPI) GetStateStore(name string) (state.Store, error) { } func (a *UniversalAPI) QueryStateAlpha1(ctx context.Context, in *runtimev1pb.QueryStateRequest) (*runtimev1pb.QueryStateResponse, error) { - store, err := a.GetStateStore(in.StoreName) + store, err := a.GetStateStore(in.GetStoreName()) if err != nil { // Error has already been logged return nil, err @@ -58,15 +58,15 @@ func (a *UniversalAPI) QueryStateAlpha1(ctx context.Context, in *runtimev1pb.Que return nil, err } - if encryption.EncryptedStateStore(in.StoreName) { - err = messages.ErrStateQueryFailed.WithFormat(in.StoreName, "cannot query encrypted store") + if encryption.EncryptedStateStore(in.GetStoreName()) { + err = messages.ErrStateQueryFailed.WithFormat(in.GetStoreName(), "cannot query encrypted store") a.Logger.Debug(err) return nil, err } var req state.QueryRequest - if err = json.Unmarshal([]byte(in.Query), &req.Query); err != nil { - err = messages.ErrStateQueryFailed.WithFormat(in.StoreName, "failed to parse JSON query body: "+err.Error()) + if err = json.Unmarshal([]byte(in.GetQuery()), &req.Query); err != nil { + err = messages.ErrStateQueryFailed.WithFormat(in.GetStoreName(), "failed to parse JSON query body: "+err.Error()) a.Logger.Debug(err) return nil, err } @@ -75,17 +75,17 @@ func (a *UniversalAPI) QueryStateAlpha1(ctx context.Context, in *runtimev1pb.Que start := time.Now() policyRunner := resiliency.NewRunner[*state.QueryResponse](ctx, - a.Resiliency.ComponentOutboundPolicy(in.StoreName, resiliency.Statestore), + a.Resiliency.ComponentOutboundPolicy(in.GetStoreName(), resiliency.Statestore), ) resp, err := policyRunner(func(ctx context.Context) (*state.QueryResponse, error) { return querier.Query(ctx, &req) }) elapsed := diag.ElapsedSince(start) - diag.DefaultComponentMonitoring.StateInvoked(ctx, in.StoreName, diag.StateQuery, err == nil, elapsed) + diag.DefaultComponentMonitoring.StateInvoked(ctx, in.GetStoreName(), diag.StateQuery, err == nil, elapsed) if err != nil { - err = messages.ErrStateQueryFailed.WithFormat(in.StoreName, err.Error()) + err = messages.ErrStateQueryFailed.WithFormat(in.GetStoreName(), err.Error()) a.Logger.Debug(err) return nil, err } diff --git a/pkg/grpc/universalapi/api_subtlecrypto_subtlecrypto_test.go b/pkg/grpc/universalapi/api_subtlecrypto_subtlecrypto_test.go index a6fbda88328..cd5057cefc1 100644 --- a/pkg/grpc/universalapi/api_subtlecrypto_subtlecrypto_test.go +++ b/pkg/grpc/universalapi/api_subtlecrypto_subtlecrypto_test.go @@ -109,7 +109,7 @@ func TestSubtleGetKeyAlpha1(t *testing.T) { _, err := fakeAPI.SubtleGetKeyAlpha1(context.Background(), &runtimev1pb.SubtleGetKeyRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) + require.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) }) t.Run("provider not found", func(t *testing.T) { @@ -117,7 +117,7 @@ func TestSubtleGetKeyAlpha1(t *testing.T) { ComponentName: "notfound", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) + require.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) }) t.Run("invalid format", func(t *testing.T) { @@ -126,8 +126,8 @@ func TestSubtleGetKeyAlpha1(t *testing.T) { Format: runtimev1pb.SubtleGetKeyRequest_KeyFormat(-9000), }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrBadRequest) - assert.ErrorContains(t, err, "invalid key format") + require.ErrorIs(t, err, messages.ErrBadRequest) + require.ErrorContains(t, err, "invalid key format") }) t.Run("failed to get key", func(t *testing.T) { @@ -136,8 +136,8 @@ func TestSubtleGetKeyAlpha1(t *testing.T) { Name: "error-key", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoGetKey) - assert.ErrorContains(t, err, "error occurs with error-key") + require.ErrorIs(t, err, messages.ErrCryptoGetKey) + require.ErrorContains(t, err, "error occurs with error-key") }) } @@ -172,7 +172,7 @@ func TestSubtleEncryptAlpha1(t *testing.T) { _, err := fakeAPI.SubtleEncryptAlpha1(context.Background(), &runtimev1pb.SubtleEncryptRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) + require.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) }) t.Run("provider not found", func(t *testing.T) { @@ -180,7 +180,7 @@ func TestSubtleEncryptAlpha1(t *testing.T) { ComponentName: "notfound", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) + require.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) }) t.Run("failed to encrypt", func(t *testing.T) { @@ -189,9 +189,9 @@ func TestSubtleEncryptAlpha1(t *testing.T) { KeyName: "error", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoOperation) + require.ErrorIs(t, err, messages.ErrCryptoOperation) // The actual error is not returned to the user for security reasons - assert.ErrorContains(t, err, "failed to encrypt") + require.ErrorContains(t, err, "failed to encrypt") }) } @@ -225,7 +225,7 @@ func TestSubtleDecryptAlpha1(t *testing.T) { _, err := fakeAPI.SubtleDecryptAlpha1(context.Background(), &runtimev1pb.SubtleDecryptRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) + require.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) }) t.Run("provider not found", func(t *testing.T) { @@ -233,7 +233,7 @@ func TestSubtleDecryptAlpha1(t *testing.T) { ComponentName: "notfound", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) + require.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) }) t.Run("failed to decrypt", func(t *testing.T) { @@ -242,9 +242,9 @@ func TestSubtleDecryptAlpha1(t *testing.T) { KeyName: "error", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoOperation) + require.ErrorIs(t, err, messages.ErrCryptoOperation) // The actual error is not returned to the user for security reasons - assert.ErrorContains(t, err, "failed to decrypt") + require.ErrorContains(t, err, "failed to decrypt") }) } @@ -278,7 +278,7 @@ func TestSubtleWrapKeyAlpha1(t *testing.T) { _, err := fakeAPI.SubtleWrapKeyAlpha1(context.Background(), &runtimev1pb.SubtleWrapKeyRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) + require.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) }) t.Run("provider not found", func(t *testing.T) { @@ -286,7 +286,7 @@ func TestSubtleWrapKeyAlpha1(t *testing.T) { ComponentName: "notfound", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) + require.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) }) t.Run("key is empty", func(t *testing.T) { @@ -295,8 +295,8 @@ func TestSubtleWrapKeyAlpha1(t *testing.T) { KeyName: "error", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoOperation) - assert.ErrorContains(t, err, "key is empty") + require.ErrorIs(t, err, messages.ErrCryptoOperation) + require.ErrorContains(t, err, "key is empty") }) t.Run("failed to wrap key", func(t *testing.T) { @@ -306,9 +306,9 @@ func TestSubtleWrapKeyAlpha1(t *testing.T) { PlaintextKey: oneHundredTwentyEightBits, }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoOperation) + require.ErrorIs(t, err, messages.ErrCryptoOperation) // The actual error is not returned to the user for security reasons - assert.ErrorContains(t, err, "failed to wrap key") + require.ErrorContains(t, err, "failed to wrap key") }) } @@ -342,7 +342,7 @@ func TestSubtleUnwrapKeyAlpha1(t *testing.T) { _, err := fakeAPI.SubtleUnwrapKeyAlpha1(context.Background(), &runtimev1pb.SubtleUnwrapKeyRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) + require.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) }) t.Run("provider not found", func(t *testing.T) { @@ -350,7 +350,7 @@ func TestSubtleUnwrapKeyAlpha1(t *testing.T) { ComponentName: "notfound", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) + require.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) }) t.Run("failed to unwrap key", func(t *testing.T) { @@ -360,9 +360,9 @@ func TestSubtleUnwrapKeyAlpha1(t *testing.T) { WrappedKey: oneHundredTwentyEightBits, }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoOperation) + require.ErrorIs(t, err, messages.ErrCryptoOperation) // The actual error is not returned to the user for security reasons - assert.ErrorContains(t, err, "failed to unwrap key") + require.ErrorContains(t, err, "failed to unwrap key") }) } @@ -396,7 +396,7 @@ func TestSubtleSignAlpha1(t *testing.T) { _, err := fakeAPI.SubtleSignAlpha1(context.Background(), &runtimev1pb.SubtleSignRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) + require.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) }) t.Run("provider not found", func(t *testing.T) { @@ -404,7 +404,7 @@ func TestSubtleSignAlpha1(t *testing.T) { ComponentName: "notfound", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) + require.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) }) t.Run("failed to sign", func(t *testing.T) { @@ -414,9 +414,9 @@ func TestSubtleSignAlpha1(t *testing.T) { Digest: oneHundredTwentyEightBits, }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoOperation) + require.ErrorIs(t, err, messages.ErrCryptoOperation) // The actual error is not returned to the user for security reasons - assert.ErrorContains(t, err, "failed to sign") + require.ErrorContains(t, err, "failed to sign") }) } @@ -462,7 +462,7 @@ func TestSubtleVerifyAlpha1(t *testing.T) { _, err := fakeAPI.SubtleVerifyAlpha1(context.Background(), &runtimev1pb.SubtleVerifyRequest{}) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) + require.ErrorIs(t, err, messages.ErrCryptoProvidersNotConfigured) }) t.Run("provider not found", func(t *testing.T) { @@ -470,7 +470,7 @@ func TestSubtleVerifyAlpha1(t *testing.T) { ComponentName: "notfound", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) + require.ErrorIs(t, err, messages.ErrCryptoProviderNotFound) }) t.Run("failed to verify", func(t *testing.T) { @@ -479,9 +479,9 @@ func TestSubtleVerifyAlpha1(t *testing.T) { KeyName: "error", }) require.Error(t, err) - assert.ErrorIs(t, err, messages.ErrCryptoOperation) + require.ErrorIs(t, err, messages.ErrCryptoOperation) // The actual error is not returned to the user for security reasons - assert.ErrorContains(t, err, "failed to verify") + require.ErrorContains(t, err, "failed to verify") }) } diff --git a/pkg/grpc/universalapi/api_workflow.go b/pkg/grpc/universalapi/api_workflow.go index 00fb76a9734..49211c423f5 100644 --- a/pkg/grpc/universalapi/api_workflow.go +++ b/pkg/grpc/universalapi/api_workflow.go @@ -29,7 +29,7 @@ import ( // GetWorkflowBeta1 is the API handler for getting workflow details func (a *UniversalAPI) GetWorkflowBeta1(ctx context.Context, in *runtimev1pb.GetWorkflowRequest) (*runtimev1pb.GetWorkflowResponse, error) { - if err := a.validateInstanceID(in.InstanceId, false /* isCreate */); err != nil { + if err := a.validateInstanceID(in.GetInstanceId(), false /* isCreate */); err != nil { a.Logger.Debug(err) return &runtimev1pb.GetWorkflowResponse{}, err } @@ -37,21 +37,21 @@ func (a *UniversalAPI) GetWorkflowBeta1(ctx context.Context, in *runtimev1pb.Get // Workflow requires actors to be ready a.WaitForActorsReady(ctx) - workflowComponent, err := a.getWorkflowComponent(in.WorkflowComponent) + workflowComponent, err := a.getWorkflowComponent(in.GetWorkflowComponent()) if err != nil { a.Logger.Debug(err) return &runtimev1pb.GetWorkflowResponse{}, err } req := workflows.GetRequest{ - InstanceID: in.InstanceId, + InstanceID: in.GetInstanceId(), } response, err := workflowComponent.Get(ctx, &req) if err != nil { if errors.Is(err, api.ErrInstanceNotFound) { - err = messages.ErrWorkflowInstanceNotFound.WithFormat(in.InstanceId, err) + err = messages.ErrWorkflowInstanceNotFound.WithFormat(in.GetInstanceId(), err) } else { - err = messages.ErrWorkflowGetResponse.WithFormat(in.InstanceId, err) + err = messages.ErrWorkflowGetResponse.WithFormat(in.GetInstanceId(), err) } a.Logger.Debug(err) return &runtimev1pb.GetWorkflowResponse{}, err @@ -70,12 +70,12 @@ func (a *UniversalAPI) GetWorkflowBeta1(ctx context.Context, in *runtimev1pb.Get // StartWorkflowBeta1 is the API handler for starting a workflow func (a *UniversalAPI) StartWorkflowBeta1(ctx context.Context, in *runtimev1pb.StartWorkflowRequest) (*runtimev1pb.StartWorkflowResponse, error) { - if err := a.validateInstanceID(in.InstanceId, true /* isCreate */); err != nil { + if err := a.validateInstanceID(in.GetInstanceId(), true /* isCreate */); err != nil { a.Logger.Debug(err) return &runtimev1pb.StartWorkflowResponse{}, err } - if in.WorkflowName == "" { + if in.GetWorkflowName() == "" { err := messages.ErrWorkflowNameMissing a.Logger.Debug(err) return &runtimev1pb.StartWorkflowResponse{}, err @@ -84,22 +84,22 @@ func (a *UniversalAPI) StartWorkflowBeta1(ctx context.Context, in *runtimev1pb.S // Workflow requires actors to be ready a.WaitForActorsReady(ctx) - workflowComponent, err := a.getWorkflowComponent(in.WorkflowComponent) + workflowComponent, err := a.getWorkflowComponent(in.GetWorkflowComponent()) if err != nil { a.Logger.Debug(err) return &runtimev1pb.StartWorkflowResponse{}, err } req := workflows.StartRequest{ - InstanceID: in.InstanceId, - Options: in.Options, - WorkflowName: in.WorkflowName, - WorkflowInput: in.Input, + InstanceID: in.GetInstanceId(), + Options: in.GetOptions(), + WorkflowName: in.GetWorkflowName(), + WorkflowInput: in.GetInput(), } resp, err := workflowComponent.Start(ctx, &req) if err != nil { - err := messages.ErrStartWorkflow.WithFormat(in.WorkflowName, err) + err := messages.ErrStartWorkflow.WithFormat(in.GetWorkflowName(), err) a.Logger.Debug(err) return &runtimev1pb.StartWorkflowResponse{}, err } @@ -112,7 +112,7 @@ func (a *UniversalAPI) StartWorkflowBeta1(ctx context.Context, in *runtimev1pb.S // TerminateWorkflowBeta1 is the API handler for terminating a workflow func (a *UniversalAPI) TerminateWorkflowBeta1(ctx context.Context, in *runtimev1pb.TerminateWorkflowRequest) (*emptypb.Empty, error) { emptyResponse := &emptypb.Empty{} - if err := a.validateInstanceID(in.InstanceId, false /* isCreate */); err != nil { + if err := a.validateInstanceID(in.GetInstanceId(), false /* isCreate */); err != nil { a.Logger.Debug(err) return emptyResponse, err } @@ -120,20 +120,20 @@ func (a *UniversalAPI) TerminateWorkflowBeta1(ctx context.Context, in *runtimev1 // Workflow requires actors to be ready a.WaitForActorsReady(ctx) - workflowComponent, err := a.getWorkflowComponent(in.WorkflowComponent) + workflowComponent, err := a.getWorkflowComponent(in.GetWorkflowComponent()) if err != nil { a.Logger.Debug(err) return emptyResponse, err } req := &workflows.TerminateRequest{ - InstanceID: in.InstanceId, + InstanceID: in.GetInstanceId(), } if err := workflowComponent.Terminate(ctx, req); err != nil { if errors.Is(err, api.ErrInstanceNotFound) { - err = messages.ErrWorkflowInstanceNotFound.WithFormat(in.InstanceId, err) + err = messages.ErrWorkflowInstanceNotFound.WithFormat(in.GetInstanceId(), err) } else { - err = messages.ErrTerminateWorkflow.WithFormat(in.InstanceId, err) + err = messages.ErrTerminateWorkflow.WithFormat(in.GetInstanceId(), err) } a.Logger.Debug(err) return emptyResponse, err @@ -144,12 +144,12 @@ func (a *UniversalAPI) TerminateWorkflowBeta1(ctx context.Context, in *runtimev1 // RaiseEventWorkflowBeta1 is the API handler for raising an event to a workflow func (a *UniversalAPI) RaiseEventWorkflowBeta1(ctx context.Context, in *runtimev1pb.RaiseEventWorkflowRequest) (*emptypb.Empty, error) { emptyResponse := &emptypb.Empty{} - if err := a.validateInstanceID(in.InstanceId, false /* isCreate */); err != nil { + if err := a.validateInstanceID(in.GetInstanceId(), false /* isCreate */); err != nil { a.Logger.Debug(err) return emptyResponse, err } - if in.EventName == "" { + if in.GetEventName() == "" { err := messages.ErrMissingWorkflowEventName a.Logger.Debug(err) return emptyResponse, err @@ -158,21 +158,21 @@ func (a *UniversalAPI) RaiseEventWorkflowBeta1(ctx context.Context, in *runtimev // Workflow requires actors to be ready a.WaitForActorsReady(ctx) - workflowComponent, err := a.getWorkflowComponent(in.WorkflowComponent) + workflowComponent, err := a.getWorkflowComponent(in.GetWorkflowComponent()) if err != nil { a.Logger.Debug(err) return emptyResponse, err } req := workflows.RaiseEventRequest{ - InstanceID: in.InstanceId, - EventName: in.EventName, - EventData: in.EventData, + InstanceID: in.GetInstanceId(), + EventName: in.GetEventName(), + EventData: in.GetEventData(), } err = workflowComponent.RaiseEvent(ctx, &req) if err != nil { - err = messages.ErrRaiseEventWorkflow.WithFormat(in.InstanceId, err) + err = messages.ErrRaiseEventWorkflow.WithFormat(in.GetInstanceId(), err) a.Logger.Debug(err) return emptyResponse, err } @@ -182,7 +182,7 @@ func (a *UniversalAPI) RaiseEventWorkflowBeta1(ctx context.Context, in *runtimev // PauseWorkflowBeta1 is the API handler for pausing a workflow func (a *UniversalAPI) PauseWorkflowBeta1(ctx context.Context, in *runtimev1pb.PauseWorkflowRequest) (*emptypb.Empty, error) { emptyResponse := &emptypb.Empty{} - if err := a.validateInstanceID(in.InstanceId, false /* isCreate */); err != nil { + if err := a.validateInstanceID(in.GetInstanceId(), false /* isCreate */); err != nil { a.Logger.Debug(err) return emptyResponse, err } @@ -190,17 +190,17 @@ func (a *UniversalAPI) PauseWorkflowBeta1(ctx context.Context, in *runtimev1pb.P // Workflow requires actors to be ready a.WaitForActorsReady(ctx) - workflowComponent, err := a.getWorkflowComponent(in.WorkflowComponent) + workflowComponent, err := a.getWorkflowComponent(in.GetWorkflowComponent()) if err != nil { a.Logger.Debug(err) return emptyResponse, err } req := &workflows.PauseRequest{ - InstanceID: in.InstanceId, + InstanceID: in.GetInstanceId(), } if err := workflowComponent.Pause(ctx, req); err != nil { - err = messages.ErrPauseWorkflow.WithFormat(in.InstanceId, err) + err = messages.ErrPauseWorkflow.WithFormat(in.GetInstanceId(), err) a.Logger.Debug(err) return emptyResponse, err } @@ -210,7 +210,7 @@ func (a *UniversalAPI) PauseWorkflowBeta1(ctx context.Context, in *runtimev1pb.P // ResumeWorkflowBeta1 is the API handler for resuming a workflow func (a *UniversalAPI) ResumeWorkflowBeta1(ctx context.Context, in *runtimev1pb.ResumeWorkflowRequest) (*emptypb.Empty, error) { emptyResponse := &emptypb.Empty{} - if err := a.validateInstanceID(in.InstanceId, false /* isCreate */); err != nil { + if err := a.validateInstanceID(in.GetInstanceId(), false /* isCreate */); err != nil { a.Logger.Debug(err) return emptyResponse, err } @@ -218,17 +218,17 @@ func (a *UniversalAPI) ResumeWorkflowBeta1(ctx context.Context, in *runtimev1pb. // Workflow requires actors to be ready a.WaitForActorsReady(ctx) - workflowComponent, err := a.getWorkflowComponent(in.WorkflowComponent) + workflowComponent, err := a.getWorkflowComponent(in.GetWorkflowComponent()) if err != nil { a.Logger.Debug(err) return emptyResponse, err } req := &workflows.ResumeRequest{ - InstanceID: in.InstanceId, + InstanceID: in.GetInstanceId(), } if err := workflowComponent.Resume(ctx, req); err != nil { - err = messages.ErrResumeWorkflow.WithFormat(in.InstanceId, err) + err = messages.ErrResumeWorkflow.WithFormat(in.GetInstanceId(), err) a.Logger.Debug(err) return emptyResponse, err } @@ -238,7 +238,7 @@ func (a *UniversalAPI) ResumeWorkflowBeta1(ctx context.Context, in *runtimev1pb. // PurgeWorkflowBeta1 is the API handler for purging a workflow func (a *UniversalAPI) PurgeWorkflowBeta1(ctx context.Context, in *runtimev1pb.PurgeWorkflowRequest) (*emptypb.Empty, error) { emptyResponse := &emptypb.Empty{} - if err := a.validateInstanceID(in.InstanceId, false /* isCreate */); err != nil { + if err := a.validateInstanceID(in.GetInstanceId(), false /* isCreate */); err != nil { a.Logger.Debug(err) return emptyResponse, err } @@ -246,22 +246,22 @@ func (a *UniversalAPI) PurgeWorkflowBeta1(ctx context.Context, in *runtimev1pb.P // Workflow requires actors to be ready a.WaitForActorsReady(ctx) - workflowComponent, err := a.getWorkflowComponent(in.WorkflowComponent) + workflowComponent, err := a.getWorkflowComponent(in.GetWorkflowComponent()) if err != nil { a.Logger.Debug(err) return emptyResponse, err } req := workflows.PurgeRequest{ - InstanceID: in.InstanceId, + InstanceID: in.GetInstanceId(), } err = workflowComponent.Purge(ctx, &req) if err != nil { if errors.Is(err, api.ErrInstanceNotFound) { - err = messages.ErrWorkflowInstanceNotFound.WithFormat(in.InstanceId, err) + err = messages.ErrWorkflowInstanceNotFound.WithFormat(in.GetInstanceId(), err) } else { - err = messages.ErrPurgeWorkflow.WithFormat(in.InstanceId, err) + err = messages.ErrPurgeWorkflow.WithFormat(in.GetInstanceId(), err) } a.Logger.Debug(err) return emptyResponse, err diff --git a/pkg/grpc/universalapi/api_workflow_test.go b/pkg/grpc/universalapi/api_workflow_test.go index f5d1d020d1a..c929fda91b2 100644 --- a/pkg/grpc/universalapi/api_workflow_test.go +++ b/pkg/grpc/universalapi/api_workflow_test.go @@ -17,7 +17,7 @@ import ( "context" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/dapr/components-contrib/workflows" "github.com/dapr/dapr/pkg/messages" @@ -128,9 +128,9 @@ func TestStartWorkflowBeta1API(t *testing.T) { _, err := fakeAPI.StartWorkflowBeta1(context.Background(), req) if tt.expectedError == nil { - assert.NoError(t, err) - } else if assert.Error(t, err) { - assert.Equal(t, tt.expectedError, err) + require.NoError(t, err) + } else { + require.ErrorIs(t, err, tt.expectedError) } }) } @@ -201,9 +201,9 @@ func TestGetWorkflowBeta1API(t *testing.T) { _, err := fakeAPI.GetWorkflowBeta1(context.Background(), req) if tt.expectedError == nil { - assert.NoError(t, err) - } else if assert.Error(t, err) { - assert.Equal(t, tt.expectedError, err) + require.NoError(t, err) + } else { + require.ErrorIs(t, err, tt.expectedError) } }) } @@ -274,9 +274,9 @@ func TestTerminateWorkflowBeta1API(t *testing.T) { _, err := fakeAPI.TerminateWorkflowBeta1(context.Background(), req) if tt.expectedError == nil { - assert.NoError(t, err) - } else if assert.Error(t, err) { - assert.Equal(t, tt.expectedError, err) + require.NoError(t, err) + } else { + require.ErrorIs(t, err, tt.expectedError) } }) } @@ -364,9 +364,9 @@ func TestRaiseEventWorkflowBeta1Api(t *testing.T) { _, err := fakeAPI.RaiseEventWorkflowBeta1(context.Background(), req) if tt.expectedError == nil { - assert.NoError(t, err) - } else if assert.Error(t, err) { - assert.Equal(t, tt.expectedError, err) + require.NoError(t, err) + } else { + require.ErrorIs(t, err, tt.expectedError) } }) } @@ -437,9 +437,9 @@ func TestPauseWorkflowBeta1Api(t *testing.T) { _, err := fakeAPI.PauseWorkflowBeta1(context.Background(), req) if tt.expectedError == nil { - assert.NoError(t, err) - } else if assert.Error(t, err) { - assert.Equal(t, tt.expectedError, err) + require.NoError(t, err) + } else { + require.ErrorIs(t, err, tt.expectedError) } }) } @@ -510,9 +510,9 @@ func TestResumeWorkflowBeta1Api(t *testing.T) { _, err := fakeAPI.ResumeWorkflowBeta1(context.Background(), req) if tt.expectedError == nil { - assert.NoError(t, err) - } else if assert.Error(t, err) { - assert.Equal(t, tt.expectedError, err) + require.NoError(t, err) + } else { + require.ErrorIs(t, err, tt.expectedError) } }) } diff --git a/pkg/health/health_test.go b/pkg/health/health_test.go index 28a7c75f264..444d465986a 100644 --- a/pkg/health/health_test.go +++ b/pkg/health/health_test.go @@ -155,11 +155,11 @@ func TestApplyOptions(t *testing.T) { opts := healthCheckOptions{} applyDefaults(&opts) - assert.Equal(t, opts.failureThreshold, int32(failureThreshold)) - assert.Equal(t, opts.initialDelay, initialDelay) - assert.Equal(t, opts.interval, interval) - assert.Equal(t, opts.requestTimeout, requestTimeout) - assert.Equal(t, opts.successStatusCode, successStatusCode) + assert.Equal(t, int32(failureThreshold), opts.failureThreshold) + assert.Equal(t, initialDelay, opts.initialDelay) + assert.Equal(t, interval, opts.interval) + assert.Equal(t, requestTimeout, opts.requestTimeout) + assert.Equal(t, successStatusCode, opts.successStatusCode) }) t.Run("valid custom options", func(t *testing.T) { @@ -176,11 +176,11 @@ func TestApplyOptions(t *testing.T) { for _, o := range customOpts { o(&opts) } - assert.Equal(t, opts.failureThreshold, int32(10)) - assert.Equal(t, opts.initialDelay, time.Second*11) - assert.Equal(t, opts.interval, time.Second*12) - assert.Equal(t, opts.requestTimeout, time.Second*13) - assert.Equal(t, opts.successStatusCode, 201) + assert.Equal(t, int32(10), opts.failureThreshold) + assert.Equal(t, time.Second*11, opts.initialDelay) + assert.Equal(t, time.Second*12, opts.interval) + assert.Equal(t, time.Second*13, opts.requestTimeout) + assert.Equal(t, 201, opts.successStatusCode) }) } diff --git a/pkg/http/api.go b/pkg/http/api.go index 4c2380cbff0..f39a61e5c3d 100644 --- a/pkg/http/api.go +++ b/pkg/http/api.go @@ -809,8 +809,8 @@ type subscribeConfigurationResponse struct { } type UnsubscribeConfigurationResponse struct { - Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Ok bool `json:"ok,omitempty" protobuf:"varint,1,opt,name=ok,proto3"` + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message,proto3"` } type configurationEventHandler struct { @@ -852,8 +852,8 @@ func (h *configurationEventHandler) updateEventHandler(ctx context.Context, e *c defer rResp.Close() } - if rResp != nil && rResp.Status().Code != nethttp.StatusOK { - return struct{}{}, fmt.Errorf("error sending configuration item to application, status %d", rResp.Status().Code) + if rResp != nil && rResp.Status().GetCode() != nethttp.StatusOK { + return struct{}{}, fmt.Errorf("error sending configuration item to application, status %d", rResp.Status().GetCode()) } return struct{}{}, nil }) @@ -1501,7 +1501,7 @@ func (a *api) onDirectActorMessage(reqCtx *fasthttp.RequestCtx) { reqCtx.Response.Header.SetContentType(resp.ContentType()) // Construct response. - statusCode := int(resp.Status().Code) + statusCode := int(resp.Status().GetCode()) if !resp.IsHTTPResponse() { statusCode = invokev1.HTTPStatusFromCode(codes.Code(statusCode)) } @@ -2095,23 +2095,23 @@ func (a *api) onQueryStateHandler() nethttp.HandlerFunc { }, OutModifier: func(out *runtimev1pb.QueryStateResponse) (any, error) { // If the response is empty, return nil - if out == nil || len(out.Results) == 0 { + if out == nil || len(out.GetResults()) == 0 { return nil, nil } // We need to translate this to a JSON object because one of the fields must be returned as json.RawMessage qresp := &QueryResponse{ - Results: make([]QueryItem, len(out.Results)), - Token: out.Token, - Metadata: out.Metadata, + Results: make([]QueryItem, len(out.GetResults())), + Token: out.GetToken(), + Metadata: out.GetMetadata(), } - for i := range out.Results { - qresp.Results[i].Key = stateLoader.GetOriginalStateKey(out.Results[i].Key) - if out.Results[i].Etag != "" { + for i := range out.GetResults() { + qresp.Results[i].Key = stateLoader.GetOriginalStateKey(out.GetResults()[i].GetKey()) + if out.GetResults()[i].GetEtag() != "" { qresp.Results[i].ETag = &out.Results[i].Etag } - qresp.Results[i].Error = out.Results[i].Error - qresp.Results[i].Data = json.RawMessage(out.Results[i].Data) + qresp.Results[i].Error = out.GetResults()[i].GetError() + qresp.Results[i].Data = json.RawMessage(out.GetResults()[i].GetData()) } return qresp, nil }, diff --git a/pkg/http/api_directmessaging.go b/pkg/http/api_directmessaging.go index 494090bb5d8..8cba26e90f4 100644 --- a/pkg/http/api_directmessaging.go +++ b/pkg/http/api_directmessaging.go @@ -171,7 +171,7 @@ func (a *api) onDirectMessage(w http.ResponseWriter, r *http.Request) { // Construct response if not HTTP resStatus := rResp.Status() if !rResp.IsHTTPResponse() { - statusCode := int32(invokev1.HTTPStatusFromCode(codes.Code(resStatus.Code))) + statusCode := int32(invokev1.HTTPStatusFromCode(codes.Code(resStatus.GetCode()))) if statusCode != http.StatusOK { // Close the response to replace the body _ = rResp.Close() @@ -188,12 +188,12 @@ func (a *api) onDirectMessage(w http.ResponseWriter, r *http.Request) { } else { resStatus.Code = statusCode } - } else if resStatus.Code < 200 || resStatus.Code > 399 { + } else if resStatus.GetCode() < 200 || resStatus.GetCode() > 399 { msg, _ := rResp.RawDataFull() // Returning a `codeError` here will cause Resiliency to retry the request (if retries are enabled), but if the request continues to fail, the response is sent to the user with whatever status code the app returned. return rResp, codeError{ headers: rResp.Headers(), - statusCode: int(resStatus.Code), + statusCode: int(resStatus.GetCode()), msg: msg, contentType: rResp.ContentType(), } @@ -245,7 +245,7 @@ func (a *api) onDirectMessage(w http.ResponseWriter, r *http.Request) { } defer resp.Close() - statusCode := int(resp.Status().Code) + statusCode := int(resp.Status().GetCode()) if ct := resp.ContentType(); ct != "" { w.Header().Set("content-type", ct) diff --git a/pkg/http/api_directmessaging_test.go b/pkg/http/api_directmessaging_test.go index ef54be07982..9c46c2b720f 100644 --- a/pkg/http/api_directmessaging_test.go +++ b/pkg/http/api_directmessaging_test.go @@ -131,10 +131,10 @@ func TestV1DirectMessagingEndpoints(t *testing.T) { }), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { msg := req.Message() - if msg.Method != "fakeMethod" { + if msg.GetMethod() != "fakeMethod" { return false } - if msg.HttpExtension.Verb != commonv1.HTTPExtension_POST { + if msg.GetHttpExtension().GetVerb() != commonv1.HTTPExtension_POST { return false } return true @@ -464,7 +464,7 @@ func TestV1DirectMessagingEndpoints(t *testing.T) { } msg := imr.Message() - if msg.Method != "fakeMethod" { + if msg.GetMethod() != "fakeMethod" { return false } @@ -509,7 +509,7 @@ func TestV1DirectMessagingEndpoints(t *testing.T) { } msg := imr.Message() - if msg.Method != "fakeMethod" { + if msg.GetMethod() != "fakeMethod" { return false } @@ -826,7 +826,7 @@ func TestV1DirectMessagingEndpointsWithTracer(t *testing.T) { } msg := imr.Message() - if msg.Method != "fakeMethod" { + if msg.GetMethod() != "fakeMethod" { return false } @@ -867,7 +867,7 @@ func TestV1DirectMessagingEndpointsWithTracer(t *testing.T) { } msg := imr.Message() - if msg.Method != "fakeMethod" { + if msg.GetMethod() != "fakeMethod" { return false } diff --git a/pkg/http/api_metadata.go b/pkg/http/api_metadata.go index aa48f64953d..8e1b8b31b70 100644 --- a/pkg/http/api_metadata.go +++ b/pkg/http/api_metadata.go @@ -64,50 +64,50 @@ func (a *api) onGetMetadata() http.HandlerFunc { // In the protos, the property subscriptions[*].rules is serialized as subscriptions[*].rules.rules // To maintain backwards-compatibility, we need to copy into a custom struct and marshal that instead res := &metadataResponse{ - ID: out.Id, - Extended: out.ExtendedMetadata, + ID: out.GetId(), + Extended: out.GetExtendedMetadata(), // We can embed the proto object directly only for as long as the protojson key is == json key - ActiveActorsCount: out.ActiveActorsCount, //nolint:staticcheck - RegisteredComponents: out.RegisteredComponents, - HTTPEndpoints: out.HttpEndpoints, - RuntimeVersion: out.RuntimeVersion, - EnabledFeatures: out.EnabledFeatures, + ActiveActorsCount: out.GetActiveActorsCount(), //nolint:staticcheck + RegisteredComponents: out.GetRegisteredComponents(), + HTTPEndpoints: out.GetHttpEndpoints(), + RuntimeVersion: out.GetRuntimeVersion(), + EnabledFeatures: out.GetEnabledFeatures(), } // Copy the app connection properties into a custom struct // See https://github.com/golang/protobuf/issues/256 res.AppConnectionProperties = metadataResponseAppConnectionProperties{ - Port: out.AppConnectionProperties.Port, - Protocol: out.AppConnectionProperties.Protocol, - ChannelAddress: out.AppConnectionProperties.ChannelAddress, - MaxConcurrency: out.AppConnectionProperties.MaxConcurrency, + Port: out.GetAppConnectionProperties().GetPort(), + Protocol: out.GetAppConnectionProperties().GetProtocol(), + ChannelAddress: out.GetAppConnectionProperties().GetChannelAddress(), + MaxConcurrency: out.GetAppConnectionProperties().GetMaxConcurrency(), } - if out.AppConnectionProperties.Health != nil { + if out.GetAppConnectionProperties().GetHealth() != nil { res.AppConnectionProperties.Health = &metadataResponseAppConnectionHealthProperties{ - HealthCheckPath: out.AppConnectionProperties.Health.HealthCheckPath, - HealthProbeInterval: out.AppConnectionProperties.Health.HealthProbeInterval, - HealthProbeTimeout: out.AppConnectionProperties.Health.HealthProbeTimeout, - HealthThreshold: out.AppConnectionProperties.Health.HealthThreshold, + HealthCheckPath: out.GetAppConnectionProperties().GetHealth().GetHealthCheckPath(), + HealthProbeInterval: out.GetAppConnectionProperties().GetHealth().GetHealthProbeInterval(), + HealthProbeTimeout: out.GetAppConnectionProperties().GetHealth().GetHealthProbeTimeout(), + HealthThreshold: out.GetAppConnectionProperties().GetHealth().GetHealthThreshold(), } } // Copy the subscriptions into a custom struct - if len(out.Subscriptions) > 0 { - subs := make([]metadataResponsePubsubSubscription, len(out.Subscriptions)) - for i, v := range out.Subscriptions { + if len(out.GetSubscriptions()) > 0 { + subs := make([]metadataResponsePubsubSubscription, len(out.GetSubscriptions())) + for i, v := range out.GetSubscriptions() { subs[i] = metadataResponsePubsubSubscription{ - PubsubName: v.PubsubName, - Topic: v.Topic, - Metadata: v.Metadata, - DeadLetterTopic: v.DeadLetterTopic, + PubsubName: v.GetPubsubName(), + Topic: v.GetTopic(), + Metadata: v.GetMetadata(), + DeadLetterTopic: v.GetDeadLetterTopic(), } - if v.Rules != nil && len(v.Rules.Rules) > 0 { - subs[i].Rules = make([]metadataResponsePubsubSubscriptionRule, len(v.Rules.Rules)) - for j, r := range v.Rules.Rules { + if v.GetRules() != nil && len(v.GetRules().GetRules()) > 0 { + subs[i].Rules = make([]metadataResponsePubsubSubscriptionRule, len(v.GetRules().GetRules())) + for j, r := range v.GetRules().GetRules() { subs[i].Rules[j] = metadataResponsePubsubSubscriptionRule{ - Match: r.Match, - Path: r.Path, + Match: r.GetMatch(), + Path: r.GetPath(), } } } diff --git a/pkg/http/api_secrets.go b/pkg/http/api_secrets.go index 468638dc84a..218e9444a64 100644 --- a/pkg/http/api_secrets.go +++ b/pkg/http/api_secrets.go @@ -73,12 +73,12 @@ func (a *api) onGetSecretHandler() http.HandlerFunc { }, OutModifier: func(out *runtimev1pb.GetSecretResponse) (any, error) { // If the data is nil, return nil - if out == nil || out.Data == nil { + if out == nil || out.GetData() == nil { return nil, nil } // Return just the data property - return out.Data, nil + return out.GetData(), nil }, }, ) @@ -95,15 +95,15 @@ func (a *api) onBulkGetSecretHandler() http.HandlerFunc { }, OutModifier: func(out *runtimev1pb.GetBulkSecretResponse) (any, error) { // If the data is nil, return nil - if out == nil || out.Data == nil { + if out == nil || out.GetData() == nil { return nil, nil } var secrets map[string]map[string]string - secrets = make(map[string]map[string]string, len(out.Data)) + secrets = make(map[string]map[string]string, len(out.GetData())) // Return just the secrets as map - for secretKey, secret := range out.Data { - secrets[secretKey] = secret.Secrets + for secretKey, secret := range out.GetData() { + secrets[secretKey] = secret.GetSecrets() } return secrets, nil diff --git a/pkg/http/api_test.go b/pkg/http/api_test.go index df946e2cd74..65359dc8008 100644 --- a/pkg/http/api_test.go +++ b/pkg/http/api_test.go @@ -455,7 +455,7 @@ func TestBulkPubSubEndpoints(t *testing.T) { assert.Equal(t, "ERR_PUBSUB_PUBLISH_MESSAGE", resp.ErrorBody["errorCode"]) bulkResp := BulkPublishResponse{} - assert.NoError(t, json.Unmarshal(resp.RawBody, &bulkResp)) + require.NoError(t, json.Unmarshal(resp.RawBody, &bulkResp)) assert.Equal(t, len(errBulkResponse.FailedEntries), len(bulkResp.FailedEntries)) for i, entry := range bulkResp.FailedEntries { assert.Equal(t, errBulkResponse.FailedEntries[i].EntryId, entry.EntryId) @@ -503,7 +503,7 @@ func TestBulkPubSubEndpoints(t *testing.T) { assert.Equal(t, "ERR_PUBSUB_PUBLISH_MESSAGE", resp.ErrorBody["errorCode"]) bulkResp := BulkPublishResponse{} - assert.NoError(t, json.Unmarshal(resp.RawBody, &bulkResp)) + require.NoError(t, json.Unmarshal(resp.RawBody, &bulkResp)) assert.Equal(t, len(errBulkResponse.FailedEntries), len(bulkResp.FailedEntries)) for i, entry := range bulkResp.FailedEntries { assert.Equal(t, errBulkResponse.FailedEntries[i].EntryId, entry.EntryId) @@ -832,7 +832,7 @@ func TestGetMetadataFromRequest(t *testing.T) { // assert assert.NotEmpty(t, m, "expected map to be populated") - assert.Equal(t, 1, len(m), "expected length to match") + assert.Len(t, m, 1, "expected length to match") assert.Equal(t, "test", m["test"], "test", "expected value to be equal") } @@ -847,7 +847,7 @@ func TestGetMetadataFromFastHTTPRequest(t *testing.T) { // assert assert.NotEmpty(t, m, "expected map to be populated") - assert.Equal(t, 1, len(m), "expected length to match") + assert.Len(t, m, 1, "expected length to match") assert.Equal(t, "test", m["test"], "test", "expected value to be equal") }) } @@ -1207,7 +1207,7 @@ func TestV1ActorEndpoints(t *testing.T) { // act inputBodyBytes, err := json.Marshal(testTransactionalOperations) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -1247,7 +1247,7 @@ func TestV1ActorEndpoints(t *testing.T) { // act inputBodyBytes, err := json.Marshal(testTransactionalOperations) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -1293,7 +1293,7 @@ func TestV1ActorEndpoints(t *testing.T) { // act inputBodyBytes, err := json.Marshal(testTransactionalOperations) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -1324,7 +1324,7 @@ func TestV1ActorEndpoints(t *testing.T) { // act inputBodyBytes, err := json.Marshal(reminderRequest) - assert.NoError(t, err) + require.NoError(t, err) for _, method := range []string{"POST", "PUT"} { resp := fakeServer.DoRequest(method, apiPath, inputBodyBytes, nil) assert.Equal(t, 204, resp.StatusCode) @@ -1354,7 +1354,7 @@ func TestV1ActorEndpoints(t *testing.T) { // act inputBodyBytes, err := json.Marshal(reminderRequest) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -1560,7 +1560,7 @@ func TestV1ActorEndpoints(t *testing.T) { // act inputBodyBytes, err := json.Marshal(timerRequest) - assert.NoError(t, err) + require.NoError(t, err) for _, method := range []string{"POST", "PUT"} { resp := fakeServer.DoRequest(method, apiPath, inputBodyBytes, nil) assert.Equal(t, 204, resp.StatusCode) @@ -1590,7 +1590,7 @@ func TestV1ActorEndpoints(t *testing.T) { // act inputBodyBytes, err := json.Marshal(timerRequest) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) assert.Equal(t, 500, resp.StatusCode) assert.Equal(t, "ERR_ACTOR_TIMER_CREATE", resp.ErrorBody["errorCode"]) @@ -1658,11 +1658,11 @@ func TestV1ActorEndpoints(t *testing.T) { return false } - if m.Actor == nil || m.Actor.ActorType != "fakeActorType" || m.Actor.ActorId != "fakeActorID" { + if m.GetActor() == nil || m.GetActor().GetActorType() != "fakeActorType" || m.GetActor().GetActorId() != "fakeActorID" { return false } - if m.Message == nil || m.Message.Data == nil || len(m.Message.Data.Value) == 0 || !bytes.Equal(m.Message.Data.Value, fakeData) { + if m.GetMessage() == nil || m.GetMessage().GetData() == nil || len(m.GetMessage().GetData().GetValue()) == 0 || !bytes.Equal(m.GetMessage().GetData().GetValue(), fakeData) { return false } return true @@ -1687,11 +1687,11 @@ func TestV1ActorEndpoints(t *testing.T) { return false } - if m.Actor == nil || m.Actor.ActorType != "fakeActorType" || m.Actor.ActorId != "fakeActorID" { + if m.GetActor() == nil || m.GetActor().GetActorType() != "fakeActorType" || m.GetActor().GetActorId() != "fakeActorID" { return false } - if m.Message == nil || m.Message.Data == nil || len(m.Message.Data.Value) == 0 || !bytes.Equal(m.Message.Data.Value, []byte("fakeData")) { + if m.GetMessage() == nil || m.GetMessage().GetData() == nil || len(m.GetMessage().GetData().GetValue()) == 0 || !bytes.Equal(m.GetMessage().GetData().GetValue(), []byte("fakeData")) { return false } return true @@ -1983,7 +1983,7 @@ func TestV1ActorEndpointsWithTracer(t *testing.T) { // act inputBodyBytes, err := json.Marshal(testTransactionalOperations) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -2194,7 +2194,7 @@ func TestConfigurationGet(t *testing.T) { // assert assert.NotNil(t, resp.JSONBody) - assert.Equal(t, 1, len(resp.JSONBody.(map[string]interface{}))) + assert.Len(t, resp.JSONBody.(map[string]interface{}), 1) rspMap := resp.JSONBody.(map[string]interface{}) assert.NotNil(t, rspMap) assert.Contains(t, rspMap, "good-key1") @@ -2212,7 +2212,7 @@ func TestConfigurationGet(t *testing.T) { // assert assert.NotNil(t, resp.JSONBody) - assert.Equal(t, 1, len(resp.JSONBody.(map[string]interface{}))) + assert.Len(t, resp.JSONBody.(map[string]interface{}), 1) rspMap := resp.JSONBody.(map[string]interface{}) assert.NotNil(t, rspMap) assert.Contains(t, rspMap, "good-key1") @@ -2229,7 +2229,7 @@ func TestConfigurationGet(t *testing.T) { // assert assert.Equal(t, 200, resp.StatusCode, "Accessing configuration store with good keys should return 200") assert.NotNil(t, resp.JSONBody) - assert.Equal(t, 2, len(resp.JSONBody.(map[string]interface{}))) + assert.Len(t, resp.JSONBody.(map[string]interface{}), 2) rspMap1 := resp.JSONBody.(map[string]interface{}) assert.NotNil(t, rspMap1) assert.Contains(t, rspMap1, "good-key1") @@ -2255,7 +2255,7 @@ func TestConfigurationGet(t *testing.T) { // assert assert.Equal(t, 200, resp.StatusCode, "Accessing configuration store with good keys should return 200") assert.NotNil(t, resp.JSONBody) - assert.Equal(t, 2, len(resp.JSONBody.(map[string]interface{}))) + assert.Len(t, resp.JSONBody.(map[string]interface{}), 2) rspMap1 := resp.JSONBody.(map[string]interface{}) assert.NotNil(t, rspMap1) assert.Contains(t, rspMap1, "good-key1") @@ -2283,7 +2283,7 @@ func TestConfigurationGet(t *testing.T) { // assert assert.NotNil(t, resp.JSONBody) - assert.Equal(t, 2, len(resp.JSONBody.(map[string]interface{}))) + assert.Len(t, resp.JSONBody.(map[string]interface{}), 2) rspMap1 := resp.JSONBody.(map[string]interface{}) assert.NotNil(t, rspMap1) assert.Contains(t, rspMap1, "good-key1") @@ -2311,7 +2311,7 @@ func TestConfigurationGet(t *testing.T) { // assert assert.NotNil(t, resp.JSONBody) - assert.Equal(t, 2, len(resp.JSONBody.(map[string]interface{}))) + assert.Len(t, resp.JSONBody.(map[string]interface{}), 2) rspMap1 := resp.JSONBody.(map[string]interface{}) assert.NotNil(t, rspMap1) assert.Contains(t, rspMap1, "good-key1") @@ -2399,7 +2399,7 @@ func TestV1Alpha1ConfigurationUnsubscribe(t *testing.T) { assert.Nil(t, rspMap1) uuid, err := uuid.NewRandom() - assert.Nil(t, err, "unable to generate id") + require.NoError(t, err, "unable to generate id") apiPath2 := fmt.Sprintf("v1.0-alpha1/configuration/%s/%s/unsubscribe", storeName, &uuid) resp2 := fakeServer.DoRequest("GET", apiPath2, nil, nil) @@ -2416,7 +2416,7 @@ func TestV1Alpha1ConfigurationUnsubscribe(t *testing.T) { assert.Nil(t, rspMap1) uuid, err := uuid.NewRandom() - assert.Nil(t, err, "unable to generate id") + require.NoError(t, err, "unable to generate id") apiPath2 := fmt.Sprintf("v1.0/configuration/%s/%s/unsubscribe", storeName, &uuid) resp2 := fakeServer.DoRequest("GET", apiPath2, nil, nil) @@ -2432,7 +2432,7 @@ func TestV1Alpha1ConfigurationUnsubscribe(t *testing.T) { assert.Nil(t, rspMap1) uuid, err := uuid.NewRandom() - assert.NoError(t, err, "unable to generate id") + require.NoError(t, err, "unable to generate id") apiPath2 := fmt.Sprintf("v1.0-alpha1/configuration/%s/%s/unsubscribe", "", &uuid) resp2 := fakeServer.DoRequest("GET", apiPath2, nil, nil) @@ -2448,7 +2448,7 @@ func TestV1Alpha1ConfigurationUnsubscribe(t *testing.T) { assert.Nil(t, rspMap1) uuid, err := uuid.NewRandom() - assert.NoError(t, err, "unable to generate id") + require.NoError(t, err, "unable to generate id") apiPath2 := fmt.Sprintf("v1.0/configuration/%s/%s/unsubscribe", "", &uuid) resp2 := fakeServer.DoRequest("GET", apiPath2, nil, nil) @@ -2510,7 +2510,7 @@ func TestV1Alpha1DistributedLock(t *testing.T) { assert.NotNil(t, resp.JSONBody) rspMap := resp.JSONBody.(map[string]interface{}) assert.NotNil(t, rspMap) - assert.Equal(t, true, rspMap["success"].(bool)) + assert.True(t, rspMap["success"].(bool)) }) t.Run("Lock with invalid resource id", func(t *testing.T) { @@ -2781,11 +2781,11 @@ func TestV1Beta1Workflow(t *testing.T) { assert.Contains(t, rspMap, "createdAt") createdAtStr := rspMap["createdAt"].(string) _, err := time.Parse(time.RFC3339, createdAtStr) // we expect timestamps to be in RFC3339 format - assert.NoError(t, err) + require.NoError(t, err) assert.Contains(t, rspMap, "lastUpdatedAt") lastUpdatedAtStr := rspMap["lastUpdatedAt"].(string) _, err = time.Parse(time.RFC3339, lastUpdatedAtStr) // we expect timestamps to be in RFC3339 format - assert.NoError(t, err) + require.NoError(t, err) }) ///////////////////////// @@ -3530,7 +3530,7 @@ func TestV1StateEndpoints(t *testing.T) { var responses []BulkGetResponse - assert.NoError(t, json.Unmarshal(resp.RawBody, &responses), "Response should be valid JSON") + require.NoError(t, json.Unmarshal(resp.RawBody, &responses), "Response should be valid JSON") expectedResponses := []BulkGetResponse{ { @@ -3563,7 +3563,7 @@ func TestV1StateEndpoints(t *testing.T) { var responses []BulkGetResponse - assert.NoError(t, json.Unmarshal(resp.RawBody, &responses), "Response should be valid JSON") + require.NoError(t, json.Unmarshal(resp.RawBody, &responses), "Response should be valid JSON") expectedResponses := []BulkGetResponse{ { @@ -4449,7 +4449,7 @@ func TestV1TransactionEndpoints(t *testing.T) { Operations: testTransactionalOperations, }) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -4479,7 +4479,7 @@ func TestV1TransactionEndpoints(t *testing.T) { inputBodyBytes, err := json.Marshal(stateTransactionRequestBody{ Operations: testTransactionalOperations, }) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert assert.Equal(t, 400, resp.StatusCode, "Accessing non-existent state store should return 400") @@ -4502,7 +4502,7 @@ func TestV1TransactionEndpoints(t *testing.T) { Operations: testTransactionalOperations, }) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -4529,7 +4529,7 @@ func TestV1TransactionEndpoints(t *testing.T) { Operations: testTransactionalOperations, }) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -4556,7 +4556,7 @@ func TestV1TransactionEndpoints(t *testing.T) { Operations: testTransactionalOperations, }) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -4581,7 +4581,7 @@ func TestV1TransactionEndpoints(t *testing.T) { Operations: testTransactionalOperations, }) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -4615,7 +4615,7 @@ func TestV1TransactionEndpoints(t *testing.T) { }, }) - assert.NoError(t, err) + require.NoError(t, err) resp := fakeServer.DoRequest("POST", apiPath, inputBodyBytes, nil) // assert @@ -4661,7 +4661,7 @@ func TestStateStoreErrors(t *testing.T) { err := state.NewETagError(state.ETagMismatch, errors.New("error")) e, c, m := a.etagError(err) - assert.Equal(t, true, e) + assert.True(t, e) assert.Equal(t, 409, c) assert.Equal(t, "possible etag mismatch. error from state store: error", m) }) @@ -4671,7 +4671,7 @@ func TestStateStoreErrors(t *testing.T) { err := state.NewETagError(state.ETagInvalid, errors.New("error")) e, c, m := a.etagError(err) - assert.Equal(t, true, e) + assert.True(t, e) assert.Equal(t, 400, c) assert.Equal(t, "invalid etag value: error", m) }) diff --git a/pkg/http/api_workflow.go b/pkg/http/api_workflow.go index d42218b727c..30b6a46dc3b 100644 --- a/pkg/http/api_workflow.go +++ b/pkg/http/api_workflow.go @@ -203,7 +203,7 @@ func (a *api) onStartWorkflowHandler() http.HandlerFunc { // The instance ID is optional. If not specified, we generate a random one. in.InstanceId = r.URL.Query().Get(instanceID) - if in.InstanceId == "" { + if in.GetInstanceId() == "" { randomID, err := uuid.NewRandom() if err != nil { return nil, err diff --git a/pkg/http/server_test.go b/pkg/http/server_test.go index 0725db0b630..f13df489e19 100644 --- a/pkg/http/server_test.go +++ b/pkg/http/server_test.go @@ -245,7 +245,7 @@ func TestUnescapeRequestParametersHandler(t *testing.T) { chiCtx.URLParams.Add(parameter["parameterName"], parameter["parameterValue"]) err := srv.unespaceRequestParametersInContext(chiCtx) require.Error(t, err) - assert.ErrorContains(t, err, "failed to unescape request parameter") + require.ErrorContains(t, err, "failed to unescape request parameter") } }) } @@ -322,7 +322,7 @@ func TestAPILogging(t *testing.T) { timeStr, ok := logData["time"].(string) assert.True(t, ok) tt, err := time.Parse(time.RFC3339Nano, timeStr) - assert.NoError(t, err) + require.NoError(t, err) assert.InDelta(t, time.Now().Unix(), tt.Unix(), 120) // In our test the duration better be no more than 10ms! @@ -453,7 +453,7 @@ func TestClose(t *testing.T) { }) require.NoError(t, server.StartNonBlocking()) dapr_testing.WaitForListeningAddress(t, 5*time.Second, fmt.Sprintf("127.0.0.1:%d", port)) - assert.NoError(t, server.Close()) + require.NoError(t, server.Close()) }) t.Run("test close with api logging disabled", func(t *testing.T) { @@ -479,6 +479,6 @@ func TestClose(t *testing.T) { }) require.NoError(t, server.StartNonBlocking()) dapr_testing.WaitForListeningAddress(t, 5*time.Second, fmt.Sprintf("127.0.0.1:%d", port)) - assert.NoError(t, server.Close()) + require.NoError(t, server.Close()) }) } diff --git a/pkg/http/universalapi_test.go b/pkg/http/universalapi_test.go index 2136669621d..39162c768fd 100644 --- a/pkg/http/universalapi_test.go +++ b/pkg/http/universalapi_test.go @@ -186,7 +186,7 @@ func TestUniversalHTTPHandler(t *testing.T) { respBody, err := io.ReadAll(resp.Body) require.NoError(t, err) - assert.Len(t, respBody, 0) + assert.Empty(t, respBody) }) t.Run("Option SuccessStatusCode", func(t *testing.T) { @@ -319,7 +319,7 @@ func TestUniversalHTTPHandler(t *testing.T) { respBody, err := io.ReadAll(resp.Body) require.NoError(t, err) - assert.Len(t, respBody, 0) + assert.Empty(t, respBody) }) t.Run("Option OutModifier returns JSON data", func(t *testing.T) { diff --git a/pkg/http/util_test.go b/pkg/http/util_test.go index 53aad646e30..afbdf57d495 100644 --- a/pkg/http/util_test.go +++ b/pkg/http/util_test.go @@ -15,7 +15,6 @@ package http import ( "encoding/json" - "errors" "testing" "github.com/stretchr/testify/assert" @@ -27,20 +26,20 @@ func TestConvertEventToBytes(t *testing.T) { res, err := ConvertEventToBytes(map[string]string{ "test": "event", }, "application/octet-stream") - assert.Error(t, err) - assert.True(t, errors.Is(err, errContentTypeMismatch)) + require.Error(t, err) + require.ErrorIs(t, err, errContentTypeMismatch) assert.Equal(t, []byte{}, res) }) t.Run("serialize base64 bin to bytes proper content type", func(t *testing.T) { res, err := ConvertEventToBytes("dGVzdCBldmVudA==", "application/octet-stream") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []byte("test event"), res) }) t.Run("serialize string data with octet-stream content type", func(t *testing.T) { res, err := ConvertEventToBytes("test event", "application/octet-stream") - assert.Error(t, err) + require.Error(t, err) t.Log(err) assert.Equal(t, []byte{}, res) }) @@ -49,34 +48,34 @@ func TestConvertEventToBytes(t *testing.T) { res, err := ConvertEventToBytes(map[string]string{ "test": "event", }, "text/plain") - assert.Error(t, err) - assert.True(t, errors.Is(err, errContentTypeMismatch)) + require.Error(t, err) + require.ErrorIs(t, err, errContentTypeMismatch) assert.Equal(t, []byte{}, res) }) t.Run("serialize string data with application/json content type", func(t *testing.T) { res, err := ConvertEventToBytes("test/plain", "application/json") - assert.NoError(t, err) + require.NoError(t, err) // escape quotes assert.Equal(t, []byte("\"test/plain\""), res) }) t.Run("serialize string data with text/plain content type", func(t *testing.T) { res, err := ConvertEventToBytes("test event", "text/plain") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []byte("test event"), res) }) t.Run("serialize string data with application/xml content type", func(t *testing.T) { res, err := ConvertEventToBytes("", "text/plain") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []byte(""), res) }) t.Run("serialize string data with wrong content type", func(t *testing.T) { res, err := ConvertEventToBytes("", "image/png") - assert.Error(t, err) - assert.True(t, errors.Is(err, errContentTypeMismatch)) + require.Error(t, err) + require.ErrorIs(t, err, errContentTypeMismatch) assert.Equal(t, []byte{}, res) }) @@ -87,7 +86,7 @@ func TestConvertEventToBytes(t *testing.T) { exp, err := json.Marshal(event) require.NoError(t, err, "expected no error here") res, err := ConvertEventToBytes(event, "application/json") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, exp, res) }) @@ -106,7 +105,7 @@ func TestConvertEventToBytes(t *testing.T) { exp, err := json.Marshal(event) require.NoError(t, err, "expected no error here") res, err := ConvertEventToBytes(event, "application/cloudevents+json") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, exp, res) }) } diff --git a/pkg/injector/namespacednamematcher/namenamespacematcher_test.go b/pkg/injector/namespacednamematcher/namenamespacematcher_test.go index 06edd8de8c2..aad7223dad7 100644 --- a/pkg/injector/namespacednamematcher/namenamespacematcher_test.go +++ b/pkg/injector/namespacednamematcher/namenamespacematcher_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -153,13 +154,10 @@ func TestGetNameNamespaces(t *testing.T) { t.Run(tc.name, func(t *testing.T) { matcher, err := CreateFromString(tc.s) if tc.wantError { - if assert.Error(t, err, "expecting error but did not get it") { - return - } + require.Error(t, err, "expecting error but did not get it") + return } else { - if !assert.NoError(t, err, "not expecting error to happen") { - return - } + require.NoError(t, err, "not expecting error to happen") } assert.Equalf(t, tc.wantPrefixed, matcher.prefixed, "CreateFromString(%v)", tc.s) assert.Equalf(t, tc.wantEqual, matcher.equal, "CreateFromString(%v)", tc.s) @@ -256,7 +254,7 @@ func TestEqualPrefixNameNamespaceMatcherMatchesObject(t *testing.T) { for _, tc := range tests { matcher, err := CreateFromString(tc.namespaceNames) if tc.wantError { - assert.Error(t, err, "expecting error") + require.Error(t, err, "expecting error") continue } sa := &corev1.ServiceAccount{ diff --git a/pkg/injector/patcher/sidecar.go b/pkg/injector/patcher/sidecar.go index 6bc9d2df439..408aa035887 100644 --- a/pkg/injector/patcher/sidecar.go +++ b/pkg/injector/patcher/sidecar.go @@ -59,48 +59,48 @@ type SidecarConfig struct { Enabled bool `annotation:"dapr.io/enabled"` AppPort int32 `annotation:"dapr.io/app-port"` Config string `annotation:"dapr.io/config"` - AppProtocol string `annotation:"dapr.io/app-protocol" default:"http"` + AppProtocol string `annotation:"dapr.io/app-protocol" default:"http"` AppSSL bool `annotation:"dapr.io/app-ssl"` // TODO: Deprecated in Dapr 1.11; remove in a future Dapr version AppID string `annotation:"dapr.io/app-id"` EnableProfiling bool `annotation:"dapr.io/enable-profiling"` - LogLevel string `annotation:"dapr.io/log-level" default:"info"` + LogLevel string `annotation:"dapr.io/log-level" default:"info"` APITokenSecret string `annotation:"dapr.io/api-token-secret"` AppTokenSecret string `annotation:"dapr.io/app-token-secret"` LogAsJSON bool `annotation:"dapr.io/log-as-json"` AppMaxConcurrency *int `annotation:"dapr.io/app-max-concurrency"` - EnableMetrics bool `annotation:"dapr.io/enable-metrics" default:"true"` - SidecarMetricsPort int32 `annotation:"dapr.io/metrics-port" default:"9090"` - EnableDebug bool `annotation:"dapr.io/enable-debug" default:"false"` - SidecarDebugPort int32 `annotation:"dapr.io/debug-port" default:"40000"` + EnableMetrics bool `annotation:"dapr.io/enable-metrics" default:"true"` + SidecarMetricsPort int32 `annotation:"dapr.io/metrics-port" default:"9090"` + EnableDebug bool `annotation:"dapr.io/enable-debug" default:"false"` + SidecarDebugPort int32 `annotation:"dapr.io/debug-port" default:"40000"` Env string `annotation:"dapr.io/env"` SidecarCPURequest string `annotation:"dapr.io/sidecar-cpu-request"` SidecarCPULimit string `annotation:"dapr.io/sidecar-cpu-limit"` SidecarMemoryRequest string `annotation:"dapr.io/sidecar-memory-request"` SidecarMemoryLimit string `annotation:"dapr.io/sidecar-memory-limit"` - SidecarListenAddresses string `annotation:"dapr.io/sidecar-listen-addresses" default:"[::1],127.0.0.1"` - SidecarLivenessProbeDelaySeconds int32 `annotation:"dapr.io/sidecar-liveness-probe-delay-seconds" default:"3"` - SidecarLivenessProbeTimeoutSeconds int32 `annotation:"dapr.io/sidecar-liveness-probe-timeout-seconds" default:"3"` - SidecarLivenessProbePeriodSeconds int32 `annotation:"dapr.io/sidecar-liveness-probe-period-seconds" default:"6"` - SidecarLivenessProbeThreshold int32 `annotation:"dapr.io/sidecar-liveness-probe-threshold" default:"3"` - SidecarReadinessProbeDelaySeconds int32 `annotation:"dapr.io/sidecar-readiness-probe-delay-seconds" default:"3"` + SidecarListenAddresses string `annotation:"dapr.io/sidecar-listen-addresses" default:"[::1],127.0.0.1"` + SidecarLivenessProbeDelaySeconds int32 `annotation:"dapr.io/sidecar-liveness-probe-delay-seconds" default:"3"` + SidecarLivenessProbeTimeoutSeconds int32 `annotation:"dapr.io/sidecar-liveness-probe-timeout-seconds" default:"3"` + SidecarLivenessProbePeriodSeconds int32 `annotation:"dapr.io/sidecar-liveness-probe-period-seconds" default:"6"` + SidecarLivenessProbeThreshold int32 `annotation:"dapr.io/sidecar-liveness-probe-threshold" default:"3"` + SidecarReadinessProbeDelaySeconds int32 `annotation:"dapr.io/sidecar-readiness-probe-delay-seconds" default:"3"` SidecarReadinessProbeTimeoutSeconds int32 `annotation:"dapr.io/sidecar-readiness-probe-timeout-seconds" default:"3"` - SidecarReadinessProbePeriodSeconds int32 `annotation:"dapr.io/sidecar-readiness-probe-period-seconds" default:"6"` - SidecarReadinessProbeThreshold int32 `annotation:"dapr.io/sidecar-readiness-probe-threshold" default:"3"` + SidecarReadinessProbePeriodSeconds int32 `annotation:"dapr.io/sidecar-readiness-probe-period-seconds" default:"6"` + SidecarReadinessProbeThreshold int32 `annotation:"dapr.io/sidecar-readiness-probe-threshold" default:"3"` SidecarImage string `annotation:"dapr.io/sidecar-image"` SidecarSeccompProfileType string `annotation:"dapr.io/sidecar-seccomp-profile-type"` HTTPMaxRequestSize *int `annotation:"dapr.io/http-max-request-size"` HTTPReadBufferSize *int `annotation:"dapr.io/http-read-buffer-size"` - GracefulShutdownSeconds int `annotation:"dapr.io/graceful-shutdown-seconds" default:"-1"` + GracefulShutdownSeconds int `annotation:"dapr.io/graceful-shutdown-seconds" default:"-1"` EnableAPILogging *bool `annotation:"dapr.io/enable-api-logging"` UnixDomainSocketPath string `annotation:"dapr.io/unix-domain-socket-path"` VolumeMounts string `annotation:"dapr.io/volume-mounts"` VolumeMountsRW string `annotation:"dapr.io/volume-mounts-rw"` DisableBuiltinK8sSecretStore bool `annotation:"dapr.io/disable-builtin-k8s-secret-store"` EnableAppHealthCheck bool `annotation:"dapr.io/enable-app-health-check"` - AppHealthCheckPath string `annotation:"dapr.io/app-health-check-path" default:"/healthz"` - AppHealthProbeInterval int32 `annotation:"dapr.io/app-health-probe-interval" default:"5"` // In seconds - AppHealthProbeTimeout int32 `annotation:"dapr.io/app-health-probe-timeout" default:"500"` // In milliseconds - AppHealthThreshold int32 `annotation:"dapr.io/app-health-threshold" default:"3"` + AppHealthCheckPath string `annotation:"dapr.io/app-health-check-path" default:"/healthz"` + AppHealthProbeInterval int32 `annotation:"dapr.io/app-health-probe-interval" default:"5"` // In seconds + AppHealthProbeTimeout int32 `annotation:"dapr.io/app-health-probe-timeout" default:"500"` // In milliseconds + AppHealthThreshold int32 `annotation:"dapr.io/app-health-threshold" default:"3"` PlacementAddress string `annotation:"dapr.io/placement-host-address"` PluggableComponents string `annotation:"dapr.io/pluggable-components"` PluggableComponentsSocketsFolder string `annotation:"dapr.io/pluggable-components-sockets-folder"` diff --git a/pkg/injector/patcher/sidecar_container_test.go b/pkg/injector/patcher/sidecar_container_test.go index 8a27417805b..486e0e65068 100644 --- a/pkg/injector/patcher/sidecar_container_test.go +++ b/pkg/injector/patcher/sidecar_container_test.go @@ -87,7 +87,7 @@ func TestParseEnvString(t *testing.T) { c := NewSidecarConfig(&corev1.Pod{}) c.Env = tc.envStr envKeys, envVars := c.getEnv() - assert.Equal(t, tc.expLen, len(envVars)) + assert.Len(t, envVars, tc.expLen) assert.Equal(t, tc.expKeys, envKeys) assert.Equal(t, tc.expEnv, envVars) }) @@ -360,7 +360,7 @@ func TestGetSidecarContainer(t *testing.T) { } // Command should be empty, image's entrypoint to be used. - assert.Equal(t, 0, len(container.Command)) + assert.Empty(t, container.Command) // NAMESPACE assert.Equal(t, "dapr-system", container.Env[0].Value) // POD_NAME @@ -450,7 +450,7 @@ func TestGetSidecarContainer(t *testing.T) { } // Command should be empty, image's entrypoint to be used. - assert.Equal(t, 0, len(container.Command)) + assert.Empty(t, container.Command) // NAMESPACE assert.Equal(t, "dapr-system", container.Env[0].Value) // POD_NAME @@ -606,7 +606,7 @@ func TestGetSidecarContainer(t *testing.T) { name: "default does not use UDS", annotations: map[string]string{}, assertFn: func(t *testing.T, container *corev1.Container) { - assert.Equal(t, 0, len(container.VolumeMounts)) + assert.Empty(t, container.VolumeMounts) }, }, { @@ -832,11 +832,11 @@ func TestGetSidecarContainer(t *testing.T) { t.Run(tc.name, func(t *testing.T) { if tc.explicitCommandSpecified { - assert.True(t, len(container.Command) > 0, "Must contain a command") - assert.True(t, len(container.Args) > 0, "Must contain arguments") + assert.NotEmpty(t, container.Command, "Must contain a command") + assert.NotEmpty(t, container.Args, "Must contain arguments") } else { - assert.Len(t, container.Command, 0, "Must not contain a command") - assert.True(t, len(container.Args) > 0, "Must contain arguments") + assert.Empty(t, container.Command, "Must not contain a command") + assert.NotEmpty(t, container.Args, "Must contain arguments") } }) } diff --git a/pkg/injector/patcher/sidecar_patcher_test.go b/pkg/injector/patcher/sidecar_patcher_test.go index 935ab6dc063..197b6fc4451 100644 --- a/pkg/injector/patcher/sidecar_patcher_test.go +++ b/pkg/injector/patcher/sidecar_patcher_test.go @@ -148,7 +148,7 @@ func TestAddDaprEnvVarsToContainers(t *testing.T) { t.Run(tc.testName, func(t *testing.T) { c := NewSidecarConfig(&corev1.Pod{}) patchEnv := c.addDaprEnvVarsToContainers(map[int]corev1.Container{0: tc.mockContainer}, tc.appProtocol) - assert.Equal(t, tc.expOpsLen, len(patchEnv)) + assert.Len(t, patchEnv, tc.expOpsLen) assert.Equal(t, tc.expOps, patchEnv) }) } @@ -306,7 +306,7 @@ func TestPatching(t *testing.T) { assert.Len(t, daprdContainer.VolumeMounts, 1) assert.Equal(t, "dapr-identity-token", daprdContainer.VolumeMounts[0].Name) assert.Equal(t, "/var/run/secrets/dapr.io/sentrytoken", daprdContainer.VolumeMounts[0].MountPath) - assert.Equal(t, true, daprdContainer.VolumeMounts[0].ReadOnly) + assert.True(t, daprdContainer.VolumeMounts[0].ReadOnly) assert.NotNil(t, daprdContainer.LivenessProbe) assert.Equal(t, "/v1.0/healthz", daprdContainer.LivenessProbe.HTTPGet.Path) diff --git a/pkg/injector/patcher/sidecar_volumes_test.go b/pkg/injector/patcher/sidecar_volumes_test.go index 33e697ffa0b..61071692ceb 100644 --- a/pkg/injector/patcher/sidecar_volumes_test.go +++ b/pkg/injector/patcher/sidecar_volumes_test.go @@ -94,7 +94,7 @@ func TestParseVolumeMountsString(t *testing.T) { tc := tc t.Run(tc.testName, func(t *testing.T) { mounts := parseVolumeMountsString(tc.mountStr, tc.readOnly) - assert.Equal(t, tc.expMountsLen, len(mounts)) + assert.Len(t, mounts, tc.expMountsLen) assert.Equal(t, tc.expMounts, mounts) }) } @@ -320,7 +320,7 @@ func TestAddVolumeToContainers(t *testing.T) { for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { patchEnv := addVolumeMountToContainers(map[int]corev1.Container{0: tc.mockContainer}, tc.socketMount) - assert.Equal(t, tc.expOpsLen, len(patchEnv)) + assert.Len(t, patchEnv, tc.expOpsLen) assert.Equal(t, tc.expOps, patchEnv) }) } diff --git a/pkg/injector/sentry/sentry.go b/pkg/injector/sentry/sentry.go index f4f3a09795c..7a58125f22a 100644 --- a/pkg/injector/sentry/sentry.go +++ b/pkg/injector/sentry/sentry.go @@ -108,7 +108,7 @@ func (r *Requester) RequestCertificateFromSentry(ctx context.Context, namespace return nil, nil, fmt.Errorf("failed to marshal private key: %w", err) } - return resp.WorkloadCertificate, pem.EncodeToMemory(&pem.Block{ + return resp.GetWorkloadCertificate(), pem.EncodeToMemory(&pem.Block{ Type: "PRIVATE KEY", Bytes: keyCS8, }), nil } diff --git a/pkg/injector/service/config.go b/pkg/injector/service/config.go index 31aaa602d63..12a7e5adec6 100644 --- a/pkg/injector/service/config.go +++ b/pkg/injector/service/config.go @@ -25,9 +25,9 @@ import ( // Config represents configuration options for the Dapr Sidecar Injector webhook server. type Config struct { - SidecarImage string `envconfig:"SIDECAR_IMAGE" required:"true"` + SidecarImage string `envconfig:"SIDECAR_IMAGE" required:"true"` SidecarImagePullPolicy string `envconfig:"SIDECAR_IMAGE_PULL_POLICY"` - Namespace string `envconfig:"NAMESPACE" required:"true"` + Namespace string `envconfig:"NAMESPACE" required:"true"` KubeClusterDomain string `envconfig:"KUBE_CLUSTER_DOMAIN"` AllowedServiceAccounts string `envconfig:"ALLOWED_SERVICE_ACCOUNTS"` AllowedServiceAccountsPrefixNames string `envconfig:"ALLOWED_SERVICE_ACCOUNTS_PREFIX_NAMES"` diff --git a/pkg/injector/service/config_test.go b/pkg/injector/service/config_test.go index 564e548c351..c0732c3d828 100644 --- a/pkg/injector/service/config_test.go +++ b/pkg/injector/service/config_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" ) @@ -32,7 +33,7 @@ func TestGetInjectorConfig(t *testing.T) { t.Setenv("ALLOWED_SERVICE_ACCOUNTS_PREFIX_NAMES", "namespace:test-service-account1,namespace2*:test-service-account2") cfg, err := GetConfig() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "daprd-test-image", cfg.SidecarImage) assert.Equal(t, "Always", cfg.SidecarImagePullPolicy) assert.Equal(t, "test-namespace", cfg.Namespace) @@ -50,7 +51,7 @@ func TestGetInjectorConfig(t *testing.T) { t.Setenv("KUBE_CLUSTER_DOMAIN", "") cfg, err := GetConfig() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "daprd-test-image", cfg.SidecarImage) assert.Equal(t, "IfNotPresent", cfg.SidecarImagePullPolicy) assert.Equal(t, "test-namespace", cfg.Namespace) @@ -68,7 +69,7 @@ func TestGetInjectorConfig(t *testing.T) { t.Setenv("SIDECAR_READ_ONLY_ROOT_FILESYSTEM", "") cfg, err := GetConfig() - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, cfg.GetRunAsNonRoot()) assert.True(t, cfg.GetReadOnlyRootFilesystem()) @@ -77,7 +78,7 @@ func TestGetInjectorConfig(t *testing.T) { t.Setenv("SIDECAR_READ_ONLY_ROOT_FILESYSTEM", "1") cfg, err = GetConfig() - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, cfg.GetRunAsNonRoot()) assert.True(t, cfg.GetReadOnlyRootFilesystem()) @@ -86,7 +87,7 @@ func TestGetInjectorConfig(t *testing.T) { t.Setenv("SIDECAR_READ_ONLY_ROOT_FILESYSTEM", "no") cfg, err = GetConfig() - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, cfg.GetRunAsNonRoot()) assert.False(t, cfg.GetReadOnlyRootFilesystem()) }) diff --git a/pkg/injector/service/handler_test.go b/pkg/injector/service/handler_test.go index 6f3ee8e4ab4..5dab398064c 100644 --- a/pkg/injector/service/handler_test.go +++ b/pkg/injector/service/handler_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" admissionv1 "k8s.io/api/admission/v1" authenticationv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" @@ -50,7 +51,7 @@ func TestHandleRequest(t *testing.T) { KubeClient: kubernetesfake.NewSimpleClientset(), }) - assert.NoError(t, err) + require.NoError(t, err) injector := i.(*injector) injector.currentTrustAnchors = func() ([]byte, error) { return nil, nil @@ -281,21 +282,21 @@ func TestHandleRequest(t *testing.T) { tc := tc t.Run(tc.testName, func(t *testing.T) { requestBytes, err := json.Marshal(tc.request) - assert.NoError(t, err) + require.NoError(t, err) resp, err := http.Post(ts.URL, tc.contentType, bytes.NewBuffer(requestBytes)) - assert.NoError(t, err) + require.NoError(t, err) defer resp.Body.Close() assert.Equal(t, tc.expectStatusCode, resp.StatusCode) if resp.StatusCode == http.StatusOK { body, err := io.ReadAll(resp.Body) - assert.NoError(t, err) + require.NoError(t, err) var ar admissionv1.AdmissionReview err = json.Unmarshal(body, &ar) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, tc.expectPatched, len(ar.Response.Patch) > 0) } diff --git a/pkg/injector/service/injector_test.go b/pkg/injector/service/injector_test.go index 1233bb3f6e8..1b8779f5beb 100644 --- a/pkg/injector/service/injector_test.go +++ b/pkg/injector/service/injector_test.go @@ -16,11 +16,11 @@ package service import ( "context" "encoding/json" - "errors" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,14 +40,14 @@ func TestConfigCorrectValues(t *testing.T) { ControlPlaneTrustDomain: "trust.domain", }, }) - assert.NoError(t, err) + require.NoError(t, err) injector := i.(*injector) assert.Equal(t, "c", injector.config.SidecarImage) assert.Equal(t, "d", injector.config.SidecarImagePullPolicy) assert.Equal(t, "e", injector.config.Namespace) m, err := namespacednamematcher.CreateFromString("ns*:sa,namespace:sa*") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, m, injector.namespaceNameMatcher) } @@ -60,7 +60,7 @@ func TestNewInjectorBadAllowedPrefixedServiceAccountConfig(t *testing.T) { AllowedServiceAccountsPrefixNames: "ns*:sa,namespace:sa*sa", }, }) - assert.Error(t, err) + require.Error(t, err) } func TestGetAppIDFromRequest(t *testing.T) { @@ -130,31 +130,31 @@ func TestAllowedControllersServiceAccountUID(t *testing.T) { }, } _, err := client.CoreV1().ServiceAccounts(testCase.namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) - assert.NoError(t, err) + require.NoError(t, err) } t.Run("injector config has no allowed service account", func(t *testing.T) { uids, err := AllowedControllersServiceAccountUID(context.TODO(), Config{}, client) - assert.NoError(t, err) - assert.Equal(t, 2, len(uids)) + require.NoError(t, err) + assert.Len(t, uids, 2) }) t.Run("injector config has a valid allowed service account", func(t *testing.T) { uids, err := AllowedControllersServiceAccountUID(context.TODO(), Config{AllowedServiceAccounts: "test:test"}, client) - assert.NoError(t, err) - assert.Equal(t, 3, len(uids)) + require.NoError(t, err) + assert.Len(t, uids, 3) }) t.Run("injector config has a invalid allowed service account", func(t *testing.T) { uids, err := AllowedControllersServiceAccountUID(context.TODO(), Config{AllowedServiceAccounts: "abc:abc"}, client) - assert.NoError(t, err) - assert.Equal(t, 2, len(uids)) + require.NoError(t, err) + assert.Len(t, uids, 2) }) t.Run("injector config has multiple allowed service accounts", func(t *testing.T) { uids, err := AllowedControllersServiceAccountUID(context.TODO(), Config{AllowedServiceAccounts: "test:test,abc:abc"}, client) - assert.NoError(t, err) - assert.Equal(t, 3, len(uids)) + require.NoError(t, err) + assert.Len(t, uids, 3) }) } @@ -165,13 +165,13 @@ func TestReady(t *testing.T) { t.Run("if injector ready return nil", func(t *testing.T) { i := &injector{ready: make(chan struct{})} close(i.ready) - assert.NoError(t, i.Ready(ctx)) + require.NoError(t, i.Ready(ctx)) }) t.Run("if not ready then should return timeout error if context cancelled", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*10) defer cancel() i := &injector{ready: make(chan struct{})} - assert.Error(t, i.Ready(ctx), errors.New("timed out waiting for injector to become ready")) + require.EqualError(t, i.Ready(ctx), "timed out waiting for injector to become ready") }) } diff --git a/pkg/messaging/direct_messaging.go b/pkg/messaging/direct_messaging.go index 725c5668626..b1d7bb5dcba 100644 --- a/pkg/messaging/direct_messaging.go +++ b/pkg/messaging/direct_messaging.go @@ -250,7 +250,7 @@ func (d *directMessaging) invokeHTTPEndpoint(ctx context.Context, appID, appName // Diagnostics if imr != nil { - diag.DefaultMonitoring.ServiceInvocationResponseReceived(appID, imr.Status().Code, start) + diag.DefaultMonitoring.ServiceInvocationResponseReceived(appID, imr.Status().GetCode(), start) } return imr, nopTeardown, err @@ -287,7 +287,7 @@ func (d *directMessaging) invokeRemote(ctx context.Context, appID, appNamespace, // Diagnostics if imr != nil { - diag.DefaultMonitoring.ServiceInvocationResponseReceived(appID, imr.Status().Code, start) + diag.DefaultMonitoring.ServiceInvocationResponseReceived(appID, imr.Status().GetCode(), start) } return imr, teardown, err @@ -388,7 +388,7 @@ func (d *directMessaging) invokeRemoteStream(ctx context.Context, clientV1 inter } // Send the chunk if there's anything to send - if proto.Request != nil || proto.Payload != nil { + if proto.GetRequest() != nil || proto.GetPayload() != nil { err = stream.SendMsg(proto) if errors.Is(err, io.EOF) { // If SendMsg returns an io.EOF error, it usually means that there's a transport-level error @@ -436,17 +436,17 @@ func (d *directMessaging) invokeRemoteStream(ctx context.Context, clientV1 inter } return nil, err } - if chunk.Response == nil || chunk.Response.Status == nil { + if chunk.GetResponse() == nil || chunk.GetResponse().GetStatus() == nil { return nil, errors.New("response does not contain the required fields in the leading chunk") } pr, pw := io.Pipe() - res, err := invokev1.InternalInvokeResponse(chunk.Response) + res, err := invokev1.InternalInvokeResponse(chunk.GetResponse()) if err != nil { return nil, err } - if chunk.Response.Message != nil { - res.WithContentType(chunk.Response.Message.ContentType) - res.WithDataTypeURL(chunk.Response.Message.GetData().GetTypeUrl()) // Could be empty + if chunk.GetResponse().GetMessage() != nil { + res.WithContentType(chunk.GetResponse().GetMessage().GetContentType()) + res.WithDataTypeURL(chunk.GetResponse().GetMessage().GetData().GetTypeUrl()) // Could be empty } res.WithRawData(pr) @@ -486,7 +486,7 @@ func (d *directMessaging) invokeRemoteStream(ctx context.Context, clientV1 inter return } - if chunk.Response != nil && (chunk.Response.Status != nil || chunk.Response.Headers != nil || chunk.Response.Message != nil) { + if chunk.GetResponse() != nil && (chunk.GetResponse().GetStatus() != nil || chunk.GetResponse().GetHeaders() != nil || chunk.GetResponse().GetMessage() != nil) { pw.CloseWithError(errors.New("response metadata found in non-leading chunk")) return } @@ -524,7 +524,7 @@ func (d *directMessaging) addForwardedHeadersToMetadata(req *invokev1.InvokeMeth Values: []string{value}, } } else { - metadata[header].Values = append(metadata[header].Values, value) + metadata[header].Values = append(metadata[header].GetValues(), value) } } @@ -582,16 +582,16 @@ func (d *directMessaging) getRemoteApp(appID string) (remoteApp, error) { // ReadChunk reads a chunk of data from a StreamPayload object. // The returned value "seq" indicates the sequence number func ReadChunk(payload *commonv1pb.StreamPayload, out io.Writer) (seq uint64, err error) { - if len(payload.Data) > 0 { + if len(payload.GetData()) > 0 { var n int - n, err = out.Write(payload.Data) + n, err = out.Write(payload.GetData()) if err != nil { return 0, err } - if n != len(payload.Data) { - return 0, fmt.Errorf("wrote %d out of %d bytes", n, len(payload.Data)) + if n != len(payload.GetData()) { + return 0, fmt.Errorf("wrote %d out of %d bytes", n, len(payload.GetData())) } } - return payload.Seq, nil + return payload.GetSeq(), nil } diff --git a/pkg/messaging/direct_messaging_test.go b/pkg/messaging/direct_messaging_test.go index 757fe80e8cd..d0f08adf91f 100644 --- a/pkg/messaging/direct_messaging_test.go +++ b/pkg/messaging/direct_messaging_test.go @@ -49,7 +49,7 @@ func TestDestinationHeaders(t *testing.T) { dm := &directMessaging{} dm.addDestinationAppIDHeaderToMetadata(appID, req) md := req.Metadata()[invokev1.DestinationIDHeader] - assert.Equal(t, appID, md.Values[0]) + assert.Equal(t, appID, md.GetValues()[0]) }) } @@ -65,8 +65,8 @@ func TestCallerAndCalleeHeaders(t *testing.T) { dm.addCallerAndCalleeAppIDHeaderToMetadata(callerAppID, calleeAppID, req) actualCallerAppID := req.Metadata()[invokev1.CallerIDHeader] actualCalleeAppID := req.Metadata()[invokev1.CalleeIDHeader] - assert.Equal(t, callerAppID, actualCallerAppID.Values[0]) - assert.Equal(t, calleeAppID, actualCalleeAppID.Values[0]) + assert.Equal(t, callerAppID, actualCallerAppID.GetValues()[0]) + assert.Equal(t, calleeAppID, actualCalleeAppID.GetValues()[0]) }) } @@ -83,13 +83,13 @@ func TestForwardedHeaders(t *testing.T) { dm.addForwardedHeadersToMetadata(req) md := req.Metadata()[fasthttp.HeaderXForwardedFor] - assert.Equal(t, "1", md.Values[0]) + assert.Equal(t, "1", md.GetValues()[0]) md = req.Metadata()[fasthttp.HeaderXForwardedHost] - assert.Equal(t, "2", md.Values[0]) + assert.Equal(t, "2", md.GetValues()[0]) md = req.Metadata()[fasthttp.HeaderForwarded] - assert.Equal(t, "for=1;by=1;host=2", md.Values[0]) + assert.Equal(t, "for=1;by=1;host=2", md.GetValues()[0]) }) t.Run("forwarded headers get appended", func(t *testing.T) { @@ -108,16 +108,16 @@ func TestForwardedHeaders(t *testing.T) { dm.addForwardedHeadersToMetadata(req) md := req.Metadata()[fasthttp.HeaderXForwardedFor] - assert.Equal(t, "originalXForwardedFor", md.Values[0]) - assert.Equal(t, "1", md.Values[1]) + assert.Equal(t, "originalXForwardedFor", md.GetValues()[0]) + assert.Equal(t, "1", md.GetValues()[1]) md = req.Metadata()[fasthttp.HeaderXForwardedHost] - assert.Equal(t, "originalXForwardedHost", md.Values[0]) - assert.Equal(t, "2", md.Values[1]) + assert.Equal(t, "originalXForwardedHost", md.GetValues()[0]) + assert.Equal(t, "2", md.GetValues()[1]) md = req.Metadata()[fasthttp.HeaderForwarded] - assert.Equal(t, "originalForwarded", md.Values[0]) - assert.Equal(t, "for=1;by=1;host=2", md.Values[1]) + assert.Equal(t, "originalForwarded", md.GetValues()[0]) + assert.Equal(t, "for=1;by=1;host=2", md.GetValues()[1]) }) } @@ -128,7 +128,7 @@ func TestKubernetesNamespace(t *testing.T) { dm := &directMessaging{} id, ns, err := dm.requestAppIDAndNamespace(appID) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, ns) assert.Equal(t, appID, id) }) @@ -139,7 +139,7 @@ func TestKubernetesNamespace(t *testing.T) { dm := &directMessaging{} id, ns, err := dm.requestAppIDAndNamespace(appID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "ns1", ns) assert.Equal(t, "app1", id) }) @@ -150,7 +150,7 @@ func TestKubernetesNamespace(t *testing.T) { dm := &directMessaging{} _, _, err := dm.requestAppIDAndNamespace(appID) - assert.Error(t, err) + require.Error(t, err) }) } @@ -202,7 +202,7 @@ func TestInvokeRemote(t *testing.T) { if err != nil { return } - assert.True(t, pd.Message.Data == nil || len(pd.Message.Data.Value) == 0) + assert.True(t, pd.GetMessage().GetData() == nil || len(pd.GetMessage().GetData().GetValue()) == 0) }) t.Run("streaming with single chunk", func(t *testing.T) { @@ -220,7 +220,7 @@ func TestInvokeRemote(t *testing.T) { pd, err := res.ProtoWithData() require.NoError(t, err) - assert.Equal(t, "🐱", string(pd.Message.Data.Value)) + assert.Equal(t, "🐱", string(pd.GetMessage().GetData().GetValue())) }) t.Run("streaming with multiple chunks", func(t *testing.T) { @@ -245,7 +245,7 @@ func TestInvokeRemote(t *testing.T) { pd, err := res.ProtoWithData() require.NoError(t, err) - assert.Equal(t, "Sempre caro mi fu quest'ermo colle e questa siepe, che da tanta parte dell'ultimo orizzonte il guardo esclude. … E il naufragar m'è dolce in questo mare.", string(pd.Message.Data.Value)) + assert.Equal(t, "Sempre caro mi fu quest'ermo colle e questa siepe, che da tanta parte dell'ultimo orizzonte il guardo esclude. … E il naufragar m'è dolce in questo mare.", string(pd.GetMessage().GetData().GetValue())) }) t.Run("target does not support streaming - request is not replayable", func(t *testing.T) { @@ -279,7 +279,7 @@ func TestInvokeRemote(t *testing.T) { pd, err := res.ProtoWithData() require.NoError(t, err) - assert.Equal(t, "🐶", string(pd.Message.Data.Value)) + assert.Equal(t, "🐶", string(pd.GetMessage().GetData().GetValue())) }) t.Run("target does not support streaming - request has data in-memory", func(t *testing.T) { @@ -303,7 +303,7 @@ func TestInvokeRemote(t *testing.T) { pd, err := res.ProtoWithData() require.NoError(t, err) - assert.Equal(t, "🐶", string(pd.Message.Data.Value)) + assert.Equal(t, "🐶", string(pd.GetMessage().GetData().GetValue())) }) } @@ -476,7 +476,7 @@ func TestInvokeRemoteUnaryForHTTPEndpoint(t *testing.T) { } _, err := d.invokeRemoteUnaryForHTTPEndpoint(context.Background(), nil, "abc") - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("channel not found", func(t *testing.T) { @@ -485,7 +485,7 @@ func TestInvokeRemoteUnaryForHTTPEndpoint(t *testing.T) { } _, err := d.invokeRemoteUnaryForHTTPEndpoint(context.Background(), nil, "abc") - assert.Error(t, err) + require.Error(t, err) }) } diff --git a/pkg/messaging/grpc_proxy_test.go b/pkg/messaging/grpc_proxy_test.go index e4783289c84..c76f3422cfd 100644 --- a/pkg/messaging/grpc_proxy_test.go +++ b/pkg/messaging/grpc_proxy_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" @@ -62,7 +63,7 @@ func TestNewProxy(t *testing.T) { assert.Equal(t, "a", proxy.appID) assert.NotNil(t, proxy.connectionFactory) - assert.True(t, reflect.ValueOf(connectionFn).Pointer() == reflect.ValueOf(proxy.connectionFactory).Pointer()) + assert.Equal(t, reflect.ValueOf(connectionFn).Pointer(), reflect.ValueOf(proxy.connectionFactory).Pointer()) } func TestSetRemoteAppFn(t *testing.T) { @@ -82,7 +83,7 @@ func TestSetRemoteAppFn(t *testing.T) { proxy := p.(*proxy) app, err := proxy.remoteAppFn("a") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "a", app.id) } @@ -141,7 +142,7 @@ func TestIntercept(t *testing.T) { _, conn, _, teardown, err := proxy.intercept(ctx, "/test") defer teardown(true) - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, conn) }) @@ -166,7 +167,7 @@ func TestIntercept(t *testing.T) { proxy := p.(*proxy) _, _, _, _, err := proxy.intercept(ctx, "/test") - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("proxy to the app", func(t *testing.T) { @@ -191,7 +192,7 @@ func TestIntercept(t *testing.T) { _, conn, _, teardown, err := proxy.intercept(ctx, "/test") defer teardown(true) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, conn) assert.Equal(t, "a", conn.Target()) }) @@ -219,7 +220,7 @@ func TestIntercept(t *testing.T) { ctx, conn, _, teardown, err := proxy.intercept(ctx, "/test") defer teardown(true) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, conn) assert.Equal(t, "b", conn.Target()) @@ -259,7 +260,7 @@ func TestIntercept(t *testing.T) { _, conn, _, teardown, err := proxy.intercept(ctx, "/test") defer teardown(true) - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, conn) }) @@ -279,7 +280,7 @@ func TestIntercept(t *testing.T) { _, conn, _, teardown, err := proxy.intercept(ctx, "/test") defer teardown(true) - assert.Error(t, err) + require.Error(t, err) assert.Nil(t, conn) }) } diff --git a/pkg/messaging/v1/invoke_method_request.go b/pkg/messaging/v1/invoke_method_request.go index 489c2189d24..e7ca6c4ec42 100644 --- a/pkg/messaging/v1/invoke_method_request.go +++ b/pkg/messaging/v1/invoke_method_request.go @@ -69,7 +69,7 @@ func FromInvokeRequestMessage(pb *commonv1pb.InvokeRequest) *InvokeMethodRequest // InternalInvokeRequest creates InvokeMethodRequest object from InternalInvokeRequest pb object. func InternalInvokeRequest(pb *internalv1pb.InternalInvokeRequest) (*InvokeMethodRequest, error) { req := &InvokeMethodRequest{r: pb} - if pb.Message == nil { + if pb.GetMessage() == nil { return nil, errors.New("field Message is nil") } @@ -159,7 +159,7 @@ func (imr *InvokeMethodRequest) WithHTTPExtension(verb string, querystring strin // WithCustomHTTPMetadata applies a metadata map to a InvokeMethodRequest. func (imr *InvokeMethodRequest) WithCustomHTTPMetadata(md map[string]string) *InvokeMethodRequest { for k, v := range md { - if imr.r.Metadata == nil { + if imr.r.GetMetadata() == nil { imr.r.Metadata = make(map[string]*internalv1pb.ListStringValue) } @@ -190,12 +190,12 @@ func (imr *InvokeMethodRequest) CanReplay() bool { // EncodeHTTPQueryString generates querystring for http using http extension object. func (imr *InvokeMethodRequest) EncodeHTTPQueryString() string { - m := imr.r.Message + m := imr.r.GetMessage() if m.GetHttpExtension() == nil { return "" } - return m.HttpExtension.Querystring + return m.GetHttpExtension().GetQuerystring() } // APIVersion gets API version of InvokeMethodRequest. @@ -205,7 +205,7 @@ func (imr *InvokeMethodRequest) APIVersion() internalv1pb.APIVersion { // Metadata gets Metadata of InvokeMethodRequest. func (imr *InvokeMethodRequest) Metadata() DaprInternalMetadata { - return imr.r.Metadata + return imr.r.GetMetadata() } // Proto returns InternalInvokeRequest Proto object. @@ -215,7 +215,7 @@ func (imr *InvokeMethodRequest) Proto() *internalv1pb.InternalInvokeRequest { // ProtoWithData returns a copy of the internal InternalInvokeRequest Proto object with the entire data stream read into the Data property. func (imr *InvokeMethodRequest) ProtoWithData() (*internalv1pb.InternalInvokeRequest, error) { - if imr.r == nil || imr.r.Message == nil { + if imr.r == nil || imr.r.GetMessage() == nil { return nil, errors.New("message is nil") } @@ -244,17 +244,17 @@ func (imr *InvokeMethodRequest) ProtoWithData() (*internalv1pb.InternalInvokeReq // Actor returns actor type and id. func (imr *InvokeMethodRequest) Actor() *internalv1pb.Actor { - return imr.r.Actor + return imr.r.GetActor() } // Message gets InvokeRequest Message object. func (imr *InvokeMethodRequest) Message() *commonv1pb.InvokeRequest { - return imr.r.Message + return imr.r.GetMessage() } // HasMessageData returns true if the message object contains a slice of data buffered. func (imr *InvokeMethodRequest) HasMessageData() bool { - m := imr.r.Message + m := imr.r.GetMessage() return len(m.GetData().GetValue()) > 0 } @@ -264,12 +264,12 @@ func (imr *InvokeMethodRequest) ResetMessageData() { return } - imr.r.Message.Data.Reset() + imr.r.GetMessage().GetData().Reset() } // ContenType returns the content type of the message. func (imr *InvokeMethodRequest) ContentType() string { - m := imr.r.Message + m := imr.r.GetMessage() if m == nil { return "" } @@ -285,14 +285,14 @@ func (imr *InvokeMethodRequest) ContentType() string { // RawData returns the stream body. // Note: this method is not safe for concurrent use. func (imr *InvokeMethodRequest) RawData() (r io.Reader) { - m := imr.r.Message + m := imr.r.GetMessage() if m == nil { return nil } // If the message has a data property, use that if imr.HasMessageData() { - return bytes.NewReader(m.Data.Value) + return bytes.NewReader(m.GetData().GetValue()) } return imr.replayableRequest.RawData() @@ -302,7 +302,7 @@ func (imr *InvokeMethodRequest) RawData() (r io.Reader) { func (imr *InvokeMethodRequest) RawDataFull() ([]byte, error) { // If the message has a data property, use that if imr.HasMessageData() { - return imr.r.Message.Data.Value, nil + return imr.r.GetMessage().GetData().GetValue(), nil } r := imr.RawData() @@ -319,14 +319,14 @@ func (imr *InvokeMethodRequest) GetDataObject() any { // AddMetadata adds new metadata options to the existing set. func (imr *InvokeMethodRequest) AddMetadata(md map[string][]string) { - if imr.r.Metadata == nil { + if imr.r.GetMetadata() == nil { imr.WithMetadata(md) return } for key, val := range metadataToInternalMetadata(md) { // We're only adding new values, not overwriting existing - if _, ok := imr.r.Metadata[key]; !ok { + if _, ok := imr.r.GetMetadata()[key]; !ok { imr.r.Metadata[key] = val } } diff --git a/pkg/messaging/v1/invoke_method_request_test.go b/pkg/messaging/v1/invoke_method_request_test.go index c8eaaf8d126..ba4395c1f23 100644 --- a/pkg/messaging/v1/invoke_method_request_test.go +++ b/pkg/messaging/v1/invoke_method_request_test.go @@ -37,7 +37,7 @@ func TestInvokeRequest(t *testing.T) { defer req.Close() assert.Equal(t, internalv1pb.APIVersion_V1, req.r.GetVer()) - assert.Equal(t, "test_method", req.r.Message.GetMethod()) + assert.Equal(t, "test_method", req.r.GetMessage().GetMethod()) } func TestFromInvokeRequestMessage(t *testing.T) { @@ -47,11 +47,11 @@ func TestFromInvokeRequestMessage(t *testing.T) { defer req.Close() assert.Equal(t, internalv1pb.APIVersion_V1, req.r.GetVer()) - assert.Equal(t, "frominvokerequestmessage", req.r.Message.GetMethod()) + assert.Equal(t, "frominvokerequestmessage", req.r.GetMessage().GetMethod()) bData, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) - assert.Len(t, bData, 0) + require.NoError(t, err) + assert.Empty(t, bData) }) t.Run("with data", func(t *testing.T) { @@ -63,10 +63,10 @@ func TestFromInvokeRequestMessage(t *testing.T) { defer req.Close() assert.Equal(t, internalv1pb.APIVersion_V1, req.r.GetVer()) - assert.Equal(t, "frominvokerequestmessage", req.r.Message.GetMethod()) + assert.Equal(t, "frominvokerequestmessage", req.r.GetMessage().GetMethod()) bData, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", string(bData)) }) } @@ -84,15 +84,15 @@ func TestInternalInvokeRequest(t *testing.T) { } ir, err := InternalInvokeRequest(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() - assert.NotNil(t, ir.r.Message) - assert.Equal(t, "invoketest", ir.r.Message.Method) - assert.Nil(t, ir.r.Message.Data) + assert.NotNil(t, ir.r.GetMessage()) + assert.Equal(t, "invoketest", ir.r.GetMessage().GetMethod()) + assert.Nil(t, ir.r.GetMessage().GetData()) bData, err := io.ReadAll(ir.RawData()) - assert.NoError(t, err) - assert.Len(t, bData, 0) + require.NoError(t, err) + assert.Empty(t, bData) }) t.Run("valid internal invoke request with data", func(t *testing.T) { @@ -107,16 +107,16 @@ func TestInternalInvokeRequest(t *testing.T) { } ir, err := InternalInvokeRequest(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() - assert.NotNil(t, ir.r.Message) - assert.Equal(t, "invoketest", ir.r.Message.Method) - require.NotNil(t, ir.r.Message.Data) - require.NotNil(t, ir.r.Message.Data.Value) - assert.Equal(t, []byte("test"), ir.r.Message.Data.Value) + assert.NotNil(t, ir.r.GetMessage()) + assert.Equal(t, "invoketest", ir.r.GetMessage().GetMethod()) + require.NotNil(t, ir.r.GetMessage().GetData()) + require.NotNil(t, ir.r.GetMessage().GetData().GetValue()) + assert.Equal(t, []byte("test"), ir.r.GetMessage().GetData().GetValue()) bData, err := io.ReadAll(ir.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", string(bData)) }) @@ -127,7 +127,7 @@ func TestInternalInvokeRequest(t *testing.T) { } _, err := InternalInvokeRequest(&pb) - assert.Error(t, err) + require.Error(t, err) }) } @@ -173,7 +173,7 @@ func TestData(t *testing.T) { defer req.Close() contentType := req.ContentType() bData, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "application/json", contentType) assert.Equal(t, "test", string(bData)) }) @@ -185,8 +185,8 @@ func TestData(t *testing.T) { contentType := req.ContentType() bData, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) - assert.Equal(t, "", req.r.Message.ContentType) + require.NoError(t, err) + assert.Equal(t, "", req.r.GetMessage().GetContentType()) assert.Equal(t, "", contentType) assert.Equal(t, "test", string(bData)) }) @@ -196,7 +196,7 @@ func TestData(t *testing.T) { defer req.Close() req.r.Message.Data = &anypb.Any{TypeUrl: "type", Value: []byte("fake")} bData, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, ProtobufContentType, req.ContentType()) assert.Equal(t, "fake", string(bData)) }) @@ -219,11 +219,11 @@ func TestRawData(t *testing.T) { r := req.RawData() bData, err := io.ReadAll(r) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "nel blu dipinto di blu", string(bData)) - _ = assert.Nil(t, req.Message().Data) || - assert.Len(t, req.Message().Data.Value, 0) + _ = assert.Nil(t, req.Message().GetData()) || + assert.Empty(t, req.Message().GetData().GetValue()) }) t.Run("data inside message has priority", func(t *testing.T) { @@ -238,11 +238,11 @@ func TestRawData(t *testing.T) { r := req.RawData() bData, err := io.ReadAll(r) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, msg, string(bData)) - _ = assert.NotNil(t, req.Message().Data) && - assert.Equal(t, msg, string(req.Message().Data.Value)) + _ = assert.NotNil(t, req.Message().GetData()) && + assert.Equal(t, msg, string(req.Message().GetData().GetValue())) }) } @@ -252,7 +252,7 @@ func TestRawDataFull(t *testing.T) { r: &internalv1pb.InternalInvokeRequest{}, } data, err := req.RawDataFull() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, data) }) @@ -262,11 +262,11 @@ func TestRawDataFull(t *testing.T) { defer req.Close() data, err := req.RawDataFull() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "nel blu dipinto di blu", string(data)) - _ = assert.Nil(t, req.Message().Data) || - assert.Len(t, req.Message().Data.Value, 0) + _ = assert.Nil(t, req.Message().GetData()) || + assert.Empty(t, req.Message().GetData().GetValue()) }) t.Run("data inside message has priority", func(t *testing.T) { @@ -279,11 +279,11 @@ func TestRawDataFull(t *testing.T) { req.Message().Data = &anypb.Any{Value: []byte(msg)} data, err := req.RawDataFull() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, msg, string(data)) - _ = assert.NotNil(t, req.Message().Data) && - assert.Equal(t, msg, string(req.Message().Data.Value)) + _ = assert.NotNil(t, req.Message().GetData()) && + assert.Equal(t, msg, string(req.Message().GetData().GetValue())) }) } @@ -291,7 +291,7 @@ func TestHTTPExtension(t *testing.T) { req := NewInvokeMethodRequest("test_method"). WithHTTPExtension("POST", "query1=value1&query2=value2") defer req.Close() - assert.Equal(t, commonv1pb.HTTPExtension_POST, req.Message().HttpExtension.Verb) + assert.Equal(t, commonv1pb.HTTPExtension_POST, req.Message().GetHttpExtension().GetVerb()) assert.Equal(t, "query1=value1&query2=value2", req.EncodeHTTPQueryString()) } @@ -299,7 +299,7 @@ func TestActor(t *testing.T) { req := NewInvokeMethodRequest("test_method"). WithActor("testActor", "1") defer req.Close() - assert.Equal(t, "testActor", req.Actor().ActorType) + assert.Equal(t, "testActor", req.Actor().GetActorType()) assert.Equal(t, "1", req.Actor().GetActorId()) } @@ -316,18 +316,18 @@ func TestRequestProto(t *testing.T) { } ir, err := InternalInvokeRequest(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() req2 := ir.Proto() msg := req2.GetMessage() - assert.Equal(t, "application/json", msg.ContentType) - require.NotNil(t, msg.Data) - require.NotNil(t, msg.Data.Value) - assert.Equal(t, []byte("test"), msg.Data.Value) + assert.Equal(t, "application/json", msg.GetContentType()) + require.NotNil(t, msg.GetData()) + require.NotNil(t, msg.GetData().GetValue()) + assert.Equal(t, []byte("test"), msg.GetData().GetValue()) bData, err := io.ReadAll(ir.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []byte("test"), bData) }) @@ -342,16 +342,16 @@ func TestRequestProto(t *testing.T) { } ir, err := InternalInvokeRequest(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() ir.data = newReaderCloser(strings.NewReader("test")) req2 := ir.Proto() - assert.Equal(t, "application/json", req2.GetMessage().ContentType) - assert.Nil(t, req2.GetMessage().Data) + assert.Equal(t, "application/json", req2.GetMessage().GetContentType()) + assert.Nil(t, req2.GetMessage().GetData()) bData, err := io.ReadAll(ir.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []byte("test"), bData) }) } @@ -369,13 +369,13 @@ func TestRequestProtoWithData(t *testing.T) { } ir, err := InternalInvokeRequest(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() req2, err := ir.ProtoWithData() - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, "application/json", req2.GetMessage().ContentType) - assert.Equal(t, []byte("test"), req2.GetMessage().Data.Value) + assert.Equal(t, "application/json", req2.GetMessage().GetContentType()) + assert.Equal(t, []byte("test"), req2.GetMessage().GetData().GetValue()) }) t.Run("stream", func(t *testing.T) { @@ -389,14 +389,14 @@ func TestRequestProtoWithData(t *testing.T) { } ir, err := InternalInvokeRequest(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() ir.data = newReaderCloser(strings.NewReader("test")) req2, err := ir.ProtoWithData() - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, "application/json", req2.GetMessage().ContentType) - assert.Equal(t, []byte("test"), req2.GetMessage().Data.Value) + assert.Equal(t, "application/json", req2.GetMessage().GetContentType()) + assert.Equal(t, []byte("test"), req2.GetMessage().GetData().GetValue()) }) } @@ -408,10 +408,10 @@ func TestAddHeaders(t *testing.T) { header.Add("Dapr-Reentrant-Id", "test") req.AddMetadata(header) - require.NotNil(t, req.r.Metadata) - require.NotNil(t, req.r.Metadata["Dapr-Reentrant-Id"]) - require.NotEmpty(t, req.r.Metadata["Dapr-Reentrant-Id"].Values) - assert.Equal(t, "test", req.r.Metadata["Dapr-Reentrant-Id"].Values[0]) + require.NotNil(t, req.r.GetMetadata()) + require.NotNil(t, req.r.GetMetadata()["Dapr-Reentrant-Id"]) + require.NotEmpty(t, req.r.GetMetadata()["Dapr-Reentrant-Id"].GetValues()) + assert.Equal(t, "test", req.r.GetMetadata()["Dapr-Reentrant-Id"].GetValues()[0]) }) t.Run("multiple values", func(t *testing.T) { @@ -422,10 +422,10 @@ func TestAddHeaders(t *testing.T) { header.Add("Dapr-Reentrant-Id", "test2") req.AddMetadata(header) - require.NotNil(t, req.r.Metadata) - require.NotNil(t, req.r.Metadata["Dapr-Reentrant-Id"]) - require.NotEmpty(t, req.r.Metadata["Dapr-Reentrant-Id"].Values) - assert.Equal(t, []string{"test", "test2"}, req.r.Metadata["Dapr-Reentrant-Id"].Values) + require.NotNil(t, req.r.GetMetadata()) + require.NotNil(t, req.r.GetMetadata()["Dapr-Reentrant-Id"]) + require.NotEmpty(t, req.r.GetMetadata()["Dapr-Reentrant-Id"].GetValues()) + assert.Equal(t, []string{"test", "test2"}, req.r.GetMetadata()["Dapr-Reentrant-Id"].GetValues()) }) t.Run("does not overwrite", func(t *testing.T) { @@ -437,9 +437,9 @@ func TestAddHeaders(t *testing.T) { header.Set("Dapr-Reentrant-Id", "test2") req.AddMetadata(header) - require.NotNil(t, req.r.Metadata["Dapr-Reentrant-Id"]) - require.NotEmpty(t, req.r.Metadata["Dapr-Reentrant-Id"].Values) - assert.Equal(t, "test", req.r.Metadata["Dapr-Reentrant-Id"].Values[0]) + require.NotNil(t, req.r.GetMetadata()["Dapr-Reentrant-Id"]) + require.NotEmpty(t, req.r.GetMetadata()["Dapr-Reentrant-Id"].GetValues()) + assert.Equal(t, "test", req.r.GetMetadata()["Dapr-Reentrant-Id"].GetValues()[0]) }) } @@ -466,7 +466,7 @@ func TestWithCustomHTTPMetadata(t *testing.T) { val, ok := imrMd[customMetadataKey(i)] assert.True(t, ok) // We assume only 1 value per key as the input map can only support string -> string mapping. - assert.Equal(t, customMetadataValue(i), val.Values[0]) + assert.Equal(t, customMetadataValue(i), val.GetValues()[0]) } } @@ -511,7 +511,7 @@ func TestRequestReplayable(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) @@ -523,15 +523,15 @@ func TestRequestReplayable(t *testing.T) { }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), req.replay.Len()) + assert.Len(t, message, req.replay.Len()) read, err := io.ReadAll(bytes.NewReader(req.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("close request", func(t *testing.T) { err := req.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, req.data) assert.Nil(t, req.replay) }) @@ -543,7 +543,7 @@ func TestRequestReplayable(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) @@ -555,27 +555,27 @@ func TestRequestReplayable(t *testing.T) { }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), req.replay.Len()) + assert.Len(t, message, req.replay.Len()) read, err := io.ReadAll(bytes.NewReader(req.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("second read in full", func(t *testing.T) { read, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("third read in full", func(t *testing.T) { read, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("close request", func(t *testing.T) { err := req.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, req.data) assert.Nil(t, req.replay) }) @@ -587,7 +587,7 @@ func TestRequestReplayable(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) @@ -595,14 +595,14 @@ func TestRequestReplayable(t *testing.T) { t.Run("second, partial read", func(t *testing.T) { buf := make([]byte, 9) n, err := io.ReadFull(r, buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 9, n) assert.Equal(t, message[:9], string(buf)) }) t.Run("read rest", func(t *testing.T) { read, err := io.ReadAll(r) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, read, len(message)-9) // Continue from byte 9 assert.Equal(t, message[9:], string(read)) @@ -610,13 +610,13 @@ func TestRequestReplayable(t *testing.T) { t.Run("second read in full", func(t *testing.T) { read, err := req.RawDataFull() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("close request", func(t *testing.T) { err := req.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, req.data) assert.Nil(t, req.replay) }) @@ -630,7 +630,7 @@ func TestRequestReplayable(t *testing.T) { buf := make([]byte, 9) n, err := io.ReadFull(req.RawData(), buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 9, n) assert.Equal(t, message[:9], string(buf)) }) @@ -638,13 +638,13 @@ func TestRequestReplayable(t *testing.T) { t.Run("replay buffer has partial data", func(t *testing.T) { assert.Equal(t, 9, req.replay.Len()) read, err := io.ReadAll(bytes.NewReader(req.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message[:9], string(read)) }) t.Run("second read in full", func(t *testing.T) { read, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) @@ -656,21 +656,21 @@ func TestRequestReplayable(t *testing.T) { }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), req.replay.Len()) + assert.Len(t, message, req.replay.Len()) read, err := io.ReadAll(bytes.NewReader(req.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("third read in full", func(t *testing.T) { read, err := io.ReadAll(req.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("close request", func(t *testing.T) { err := req.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, req.data) assert.Nil(t, req.replay) }) @@ -684,18 +684,18 @@ func TestRequestReplayable(t *testing.T) { pb, err := req.ProtoWithData() require.NoError(t, err) require.NotNil(t, pb) - require.NotNil(t, pb.Message) - require.NotNil(t, pb.Message.Data) - assert.Equal(t, message, string(pb.Message.Data.Value)) + require.NotNil(t, pb.GetMessage()) + require.NotNil(t, pb.GetMessage().GetData()) + assert.Equal(t, message, string(pb.GetMessage().GetData().GetValue())) }) t.Run("second ProtoWithData request", func(t *testing.T) { pb, err := req.ProtoWithData() require.NoError(t, err) require.NotNil(t, pb) - require.NotNil(t, pb.Message) - require.NotNil(t, pb.Message.Data) - assert.Equal(t, message, string(pb.Message.Data.Value)) + require.NotNil(t, pb.GetMessage()) + require.NotNil(t, pb.GetMessage().GetData()) + assert.Equal(t, message, string(pb.GetMessage().GetData().GetValue())) }) t.Run("close request", func(t *testing.T) { @@ -729,8 +729,8 @@ func TestDataTypeUrl(t *testing.T) { pd, err := req.ProtoWithData() require.NoError(t, err) require.NotNil(t, pd.GetMessage().GetData()) - assert.Equal(t, message, string(pd.Message.Data.Value)) - assert.Equal(t, typeURL, pd.Message.Data.TypeUrl) + assert.Equal(t, message, string(pd.GetMessage().GetData().GetValue())) + assert.Equal(t, typeURL, pd.GetMessage().GetData().GetTypeUrl()) // Content type should be the protobuf one assert.Equal(t, ProtobufContentType, req.ContentType()) @@ -750,8 +750,8 @@ func TestDataTypeUrl(t *testing.T) { pd, err := req.ProtoWithData() require.NoError(t, err) require.NotNil(t, pd.GetMessage().GetData()) - assert.Equal(t, message, string(pd.Message.Data.Value)) - assert.Equal(t, typeURL, pd.Message.Data.TypeUrl) + assert.Equal(t, message, string(pd.GetMessage().GetData().GetValue())) + assert.Equal(t, typeURL, pd.GetMessage().GetData().GetTypeUrl()) // Content type should be the protobuf one assert.Equal(t, ProtobufContentType, req.ContentType()) diff --git a/pkg/messaging/v1/invoke_method_response.go b/pkg/messaging/v1/invoke_method_response.go index 959ab96a201..b262e8ddfae 100644 --- a/pkg/messaging/v1/invoke_method_response.go +++ b/pkg/messaging/v1/invoke_method_response.go @@ -49,10 +49,10 @@ func NewInvokeMethodResponse(statusCode int32, statusMessage string, statusDetai // InternalInvokeResponse returns InvokeMethodResponse for InternalInvokeResponse pb to use the helpers. func InternalInvokeResponse(pb *internalv1pb.InternalInvokeResponse) (*InvokeMethodResponse, error) { rsp := &InvokeMethodResponse{r: pb} - if pb.Message == nil { + if pb.GetMessage() == nil { pb.Message = &commonv1pb.InvokeResponse{Data: nil} } - if pb.Headers == nil { + if pb.GetHeaders() == nil { pb.Headers = map[string]*internalv1pb.ListStringValue{} } @@ -144,7 +144,7 @@ func (imr *InvokeMethodResponse) Status() *internalv1pb.Status { if imr.r == nil { return nil } - return imr.r.Status + return imr.r.GetStatus() } // IsHTTPResponse returns true if response status code is http response status. @@ -154,7 +154,7 @@ func (imr *InvokeMethodResponse) IsHTTPResponse() bool { } // gRPC status code <= 15 - https://github.com/grpc/grpc/blob/master/doc/statuscodes.md // HTTP status code >= 100 - https://tools.ietf.org/html/rfc2616#section-10 - return imr.r.Status.Code >= 100 + return imr.r.GetStatus().GetCode() >= 100 } // Proto returns the internal InvokeMethodResponse Proto object. @@ -196,7 +196,7 @@ func (imr *InvokeMethodResponse) Headers() DaprInternalMetadata { if imr.r == nil { return nil } - return imr.r.Headers + return imr.r.GetHeaders() } // Trailers gets Trailers metadata. @@ -204,7 +204,7 @@ func (imr *InvokeMethodResponse) Trailers() DaprInternalMetadata { if imr.r == nil { return nil } - return imr.r.Trailers + return imr.r.GetTrailers() } // Message returns message field in InvokeMethodResponse. @@ -212,12 +212,12 @@ func (imr *InvokeMethodResponse) Message() *commonv1pb.InvokeResponse { if imr.r == nil { return nil } - return imr.r.Message + return imr.r.GetMessage() } // HasMessageData returns true if the message object contains a slice of data buffered. func (imr *InvokeMethodResponse) HasMessageData() bool { - m := imr.r.Message + m := imr.r.GetMessage() return len(m.GetData().GetValue()) > 0 } @@ -227,17 +227,17 @@ func (imr *InvokeMethodResponse) ResetMessageData() { return } - imr.r.Message.Data.Reset() + imr.r.GetMessage().GetData().Reset() } // ContenType returns the content type of the message. func (imr *InvokeMethodResponse) ContentType() string { - m := imr.r.Message + m := imr.r.GetMessage() if m == nil { return "" } - contentType := m.ContentType + contentType := m.GetContentType() // If there's a proto data and that has a type URL, or if we have a dataTypeUrl in the object, then the content type is the protobuf one if imr.dataTypeURL != "" || m.GetData().GetTypeUrl() != "" { @@ -252,7 +252,7 @@ func (imr *InvokeMethodResponse) RawData() (r io.Reader) { // If the message has a data property, use that if imr.HasMessageData() { // HasMessageData() guarantees that the `imr.r.Message` and `imr.r.Message.Data` is not nil - return bytes.NewReader(imr.r.Message.Data.Value) + return bytes.NewReader(imr.r.GetMessage().GetData().GetValue()) } return imr.replayableRequest.RawData() @@ -262,7 +262,7 @@ func (imr *InvokeMethodResponse) RawData() (r io.Reader) { func (imr *InvokeMethodResponse) RawDataFull() ([]byte, error) { // If the message has a data property, use that if imr.HasMessageData() { - return imr.r.Message.Data.Value, nil + return imr.r.GetMessage().GetData().GetValue(), nil } r := imr.RawData() diff --git a/pkg/messaging/v1/invoke_method_response_test.go b/pkg/messaging/v1/invoke_method_response_test.go index eb79b971212..83933373ab1 100644 --- a/pkg/messaging/v1/invoke_method_response_test.go +++ b/pkg/messaging/v1/invoke_method_response_test.go @@ -33,9 +33,9 @@ func TestInvocationResponse(t *testing.T) { resp := NewInvokeMethodResponse(0, "OK", nil) defer resp.Close() - assert.Equal(t, int32(0), resp.r.GetStatus().Code) - assert.Equal(t, "OK", resp.r.GetStatus().Message) - assert.NotNil(t, resp.r.Message) + assert.Equal(t, int32(0), resp.r.GetStatus().GetCode()) + assert.Equal(t, "OK", resp.r.GetStatus().GetMessage()) + assert.NotNil(t, resp.r.GetMessage()) } func TestInternalInvocationResponse(t *testing.T) { @@ -50,15 +50,15 @@ func TestInternalInvocationResponse(t *testing.T) { } ir, err := InternalInvokeResponse(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() - assert.NotNil(t, ir.r.Message) - assert.Equal(t, int32(0), ir.r.Status.Code) - assert.Nil(t, ir.r.Message.Data) + assert.NotNil(t, ir.r.GetMessage()) + assert.Equal(t, int32(0), ir.r.GetStatus().GetCode()) + assert.Nil(t, ir.r.GetMessage().GetData()) bData, err := io.ReadAll(ir.RawData()) - assert.NoError(t, err) - assert.Len(t, bData, 0) + require.NoError(t, err) + assert.Empty(t, bData) }) t.Run("valid internal invoke response with data", func(t *testing.T) { @@ -72,16 +72,16 @@ func TestInternalInvocationResponse(t *testing.T) { } ir, err := InternalInvokeResponse(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() - assert.NotNil(t, ir.r.Message) - assert.Equal(t, int32(0), ir.r.Status.Code) - require.NotNil(t, ir.r.Message.Data) - require.NotNil(t, ir.r.Message.Data.Value) - assert.Equal(t, []byte("test"), ir.r.Message.Data.Value) + assert.NotNil(t, ir.r.GetMessage()) + assert.Equal(t, int32(0), ir.r.GetStatus().GetCode()) + require.NotNil(t, ir.r.GetMessage().GetData()) + require.NotNil(t, ir.r.GetMessage().GetData().GetValue()) + assert.Equal(t, []byte("test"), ir.r.GetMessage().GetData().GetValue()) bData, err := io.ReadAll(ir.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", string(bData)) }) @@ -92,10 +92,10 @@ func TestInternalInvocationResponse(t *testing.T) { } ir, err := InternalInvokeResponse(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() - assert.NotNil(t, ir.r.Message) - assert.Nil(t, ir.r.Message.Data) + assert.NotNil(t, ir.r.GetMessage()) + assert.Nil(t, ir.r.GetMessage().GetData()) }) } @@ -106,8 +106,8 @@ func TestResponseData(t *testing.T) { WithContentType("application/json") defer resp.Close() bData, err := io.ReadAll(resp.RawData()) - assert.NoError(t, err) - contentType := resp.r.Message.ContentType + require.NoError(t, err) + contentType := resp.r.GetMessage().GetContentType() assert.Equal(t, "application/json", contentType) assert.Equal(t, "test", string(bData)) }) @@ -119,8 +119,8 @@ func TestResponseData(t *testing.T) { contentType := resp.ContentType() bData, err := io.ReadAll(resp.RawData()) - assert.NoError(t, err) - assert.Equal(t, "", resp.r.Message.ContentType) + require.NoError(t, err) + assert.Equal(t, "", resp.r.GetMessage().GetContentType()) assert.Equal(t, "", contentType) assert.Equal(t, "test", string(bData)) }) @@ -128,16 +128,16 @@ func TestResponseData(t *testing.T) { t.Run("typeurl is set but content_type is unset", func(t *testing.T) { s := &commonv1pb.StateItem{Key: "custom_key"} b, err := anypb.New(s) - assert.NoError(t, err) + require.NoError(t, err) resp := NewInvokeMethodResponse(0, "OK", nil) defer resp.Close() resp.r.Message.Data = b contentType := resp.ContentType() bData, err := io.ReadAll(resp.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, ProtobufContentType, contentType) - assert.Equal(t, b.Value, bData) + assert.Equal(t, b.GetValue(), bData) }) } @@ -149,7 +149,7 @@ func TestResponseRawData(t *testing.T) { r := req.RawData() bData, err := io.ReadAll(r) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, bData) }) @@ -161,11 +161,11 @@ func TestResponseRawData(t *testing.T) { r := req.RawData() bData, err := io.ReadAll(r) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "nel blu dipinto di blu", string(bData)) - _ = assert.Nil(t, req.Message().Data) || - assert.Len(t, req.Message().Data.Value, 0) + _ = assert.Nil(t, req.Message().GetData()) || + assert.Empty(t, req.Message().GetData().GetValue()) }) t.Run("data inside message has priority", func(t *testing.T) { @@ -180,11 +180,11 @@ func TestResponseRawData(t *testing.T) { r := req.RawData() bData, err := io.ReadAll(r) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, msg, string(bData)) - _ = assert.NotNil(t, req.Message().Data) && - assert.Equal(t, msg, string(req.Message().Data.Value)) + _ = assert.NotNil(t, req.Message().GetData()) && + assert.Equal(t, msg, string(req.Message().GetData().GetValue())) }) } @@ -194,7 +194,7 @@ func TestResponseRawDataFull(t *testing.T) { r: &internalv1pb.InternalInvokeResponse{}, } data, err := req.RawDataFull() - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, data) }) @@ -204,11 +204,11 @@ func TestResponseRawDataFull(t *testing.T) { defer req.Close() data, err := req.RawDataFull() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "nel blu dipinto di blu", string(data)) - _ = assert.Nil(t, req.Message().Data) || - assert.Len(t, req.Message().Data.Value, 0) + _ = assert.Nil(t, req.Message().GetData()) || + assert.Empty(t, req.Message().GetData().GetValue()) }) t.Run("data inside message has priority", func(t *testing.T) { @@ -221,11 +221,11 @@ func TestResponseRawDataFull(t *testing.T) { req.Message().Data = &anypb.Any{Value: []byte(msg)} data, err := req.RawDataFull() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, msg, string(data)) - _ = assert.NotNil(t, req.Message().Data) && - assert.Equal(t, msg, string(req.Message().Data.Value)) + _ = assert.NotNil(t, req.Message().GetData()) && + assert.Equal(t, msg, string(req.Message().GetData().GetValue())) }) } @@ -241,18 +241,18 @@ func TestResponseProto(t *testing.T) { } ir, err := InternalInvokeResponse(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() req2 := ir.Proto() msg := req2.GetMessage() - assert.Equal(t, "application/json", msg.ContentType) - require.NotNil(t, msg.Data) - require.NotNil(t, msg.Data.Value) - assert.Equal(t, []byte("test"), msg.Data.Value) + assert.Equal(t, "application/json", msg.GetContentType()) + require.NotNil(t, msg.GetData()) + require.NotNil(t, msg.GetData().GetValue()) + assert.Equal(t, []byte("test"), msg.GetData().GetValue()) bData, err := io.ReadAll(ir.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []byte("test"), bData) }) @@ -266,16 +266,16 @@ func TestResponseProto(t *testing.T) { } ir, err := InternalInvokeResponse(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() ir.data = io.NopCloser(strings.NewReader("test")) req2 := ir.Proto() - assert.Equal(t, "application/json", req2.Message.ContentType) - assert.Nil(t, req2.Message.Data) + assert.Equal(t, "application/json", req2.GetMessage().GetContentType()) + assert.Nil(t, req2.GetMessage().GetData()) bData, err := io.ReadAll(ir.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []byte("test"), bData) }) } @@ -288,10 +288,10 @@ func TestResponseProtoWithData(t *testing.T) { } ir, err := InternalInvokeResponse(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() _, err = ir.ProtoWithData() - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("byte slice", func(t *testing.T) { @@ -305,13 +305,13 @@ func TestResponseProtoWithData(t *testing.T) { } ir, err := InternalInvokeResponse(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() req2, err := ir.ProtoWithData() - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, "application/json", req2.Message.ContentType) - assert.Equal(t, []byte("test"), req2.Message.Data.Value) + assert.Equal(t, "application/json", req2.GetMessage().GetContentType()) + assert.Equal(t, []byte("test"), req2.GetMessage().GetData().GetValue()) }) t.Run("stream", func(t *testing.T) { @@ -324,14 +324,14 @@ func TestResponseProtoWithData(t *testing.T) { } ir, err := InternalInvokeResponse(&pb) - assert.NoError(t, err) + require.NoError(t, err) defer ir.Close() ir.data = io.NopCloser(strings.NewReader("test")) req2, err := ir.ProtoWithData() - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, "application/json", req2.Message.ContentType) - assert.Equal(t, []byte("test"), req2.Message.Data.Value) + assert.Equal(t, "application/json", req2.GetMessage().GetContentType()) + assert.Equal(t, []byte("test"), req2.GetMessage().GetData().GetValue()) }) } @@ -346,10 +346,10 @@ func TestResponseHeader(t *testing.T) { defer imr.Close() mheader := imr.Headers() - assert.Equal(t, "val1", mheader["test1"].Values[0]) - assert.Equal(t, "val2", mheader["test1"].Values[1]) - assert.Equal(t, "val3", mheader["test2"].Values[0]) - assert.Equal(t, "val4", mheader["test2"].Values[1]) + assert.Equal(t, "val1", mheader["test1"].GetValues()[0]) + assert.Equal(t, "val2", mheader["test1"].GetValues()[1]) + assert.Equal(t, "val3", mheader["test2"].GetValues()[0]) + assert.Equal(t, "val4", mheader["test2"].GetValues()[1]) }) t.Run("HTTP headers", func(t *testing.T) { @@ -433,7 +433,7 @@ func TestResponseReplayable(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(res.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) @@ -441,19 +441,19 @@ func TestResponseReplayable(t *testing.T) { buf := make([]byte, 9) n, err := io.ReadFull(res.data, buf) assert.Equal(t, 0, n) - assert.ErrorIs(t, err, io.EOF) + require.ErrorIs(t, err, io.EOF) }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), res.replay.Len()) + assert.Len(t, message, res.replay.Len()) read, err := io.ReadAll(bytes.NewReader(res.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("close response", func(t *testing.T) { err := res.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, res.data) assert.Nil(t, res.replay) }) @@ -465,7 +465,7 @@ func TestResponseReplayable(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(res.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) @@ -473,31 +473,31 @@ func TestResponseReplayable(t *testing.T) { buf := make([]byte, 9) n, err := io.ReadFull(res.data, buf) assert.Equal(t, 0, n) - assert.ErrorIs(t, err, io.EOF) + require.ErrorIs(t, err, io.EOF) }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), res.replay.Len()) + assert.Len(t, message, res.replay.Len()) read, err := io.ReadAll(bytes.NewReader(res.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("second read in full", func(t *testing.T) { read, err := io.ReadAll(res.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("third read in full", func(t *testing.T) { read, err := io.ReadAll(res.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("close response", func(t *testing.T) { err := res.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, res.data) assert.Nil(t, res.replay) }) @@ -509,7 +509,7 @@ func TestResponseReplayable(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(res.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) @@ -517,14 +517,14 @@ func TestResponseReplayable(t *testing.T) { t.Run("second, partial read", func(t *testing.T) { buf := make([]byte, 9) n, err := io.ReadFull(r, buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 9, n) assert.Equal(t, message[:9], string(buf)) }) t.Run("read rest", func(t *testing.T) { read, err := io.ReadAll(r) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, read, len(message)-9) // Continue from byte 9 assert.Equal(t, message[9:], string(read)) @@ -532,13 +532,13 @@ func TestResponseReplayable(t *testing.T) { t.Run("second read in full", func(t *testing.T) { read, err := res.RawDataFull() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("close response", func(t *testing.T) { err := res.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, res.data) assert.Nil(t, res.replay) }) @@ -552,7 +552,7 @@ func TestResponseReplayable(t *testing.T) { buf := make([]byte, 9) n, err := io.ReadFull(res.RawData(), buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 9, n) assert.Equal(t, message[:9], string(buf)) }) @@ -560,13 +560,13 @@ func TestResponseReplayable(t *testing.T) { t.Run("replay buffer has partial data", func(t *testing.T) { assert.Equal(t, 9, res.replay.Len()) read, err := io.ReadAll(bytes.NewReader(res.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message[:9], string(read)) }) t.Run("second read in full", func(t *testing.T) { read, err := io.ReadAll(res.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) @@ -574,25 +574,25 @@ func TestResponseReplayable(t *testing.T) { buf := make([]byte, 9) n, err := io.ReadFull(res.data, buf) assert.Equal(t, 0, n) - assert.ErrorIs(t, err, io.EOF) + require.ErrorIs(t, err, io.EOF) }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), res.replay.Len()) + assert.Len(t, message, res.replay.Len()) read, err := io.ReadAll(bytes.NewReader(res.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("third read in full", func(t *testing.T) { read, err := io.ReadAll(res.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, string(read)) }) t.Run("close response", func(t *testing.T) { err := res.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, res.data) assert.Nil(t, res.replay) }) @@ -604,25 +604,25 @@ func TestResponseReplayable(t *testing.T) { t.Run("first ProtoWithData response", func(t *testing.T) { pb, err := res.ProtoWithData() - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, pb) - assert.NotNil(t, pb.Message) - assert.NotNil(t, pb.Message.Data) - assert.Equal(t, message, string(pb.Message.Data.Value)) + assert.NotNil(t, pb.GetMessage()) + assert.NotNil(t, pb.GetMessage().GetData()) + assert.Equal(t, message, string(pb.GetMessage().GetData().GetValue())) }) t.Run("second ProtoWithData response", func(t *testing.T) { pb, err := res.ProtoWithData() - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, pb) - assert.NotNil(t, pb.Message) - assert.NotNil(t, pb.Message.Data) - assert.Equal(t, message, string(pb.Message.Data.Value)) + assert.NotNil(t, pb.GetMessage()) + assert.NotNil(t, pb.GetMessage().GetData()) + assert.Equal(t, message, string(pb.GetMessage().GetData().GetValue())) }) t.Run("close response", func(t *testing.T) { err := res.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, res.data) assert.Nil(t, res.replay) }) diff --git a/pkg/messaging/v1/replayable_request_test.go b/pkg/messaging/v1/replayable_request_test.go index 61de0112588..16776912fc2 100644 --- a/pkg/messaging/v1/replayable_request_test.go +++ b/pkg/messaging/v1/replayable_request_test.go @@ -44,7 +44,7 @@ func TestReplayableRequest(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(rr.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) @@ -56,15 +56,15 @@ func TestReplayableRequest(t *testing.T) { }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), rr.replay.Len()) + assert.Len(t, message, rr.replay.Len()) read, err := io.ReadAll(bytes.NewReader(rr.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) t.Run("close rr", func(t *testing.T) { err := rr.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, rr.data) assert.Nil(t, rr.replay) }) @@ -76,7 +76,7 @@ func TestReplayableRequest(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(rr.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) @@ -88,27 +88,27 @@ func TestReplayableRequest(t *testing.T) { }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), rr.replay.Len()) + assert.Len(t, message, rr.replay.Len()) read, err := io.ReadAll(bytes.NewReader(rr.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) t.Run("second read in full", func(t *testing.T) { read, err := io.ReadAll(rr.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) t.Run("third read in full", func(t *testing.T) { read, err := io.ReadAll(rr.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) t.Run("close rr", func(t *testing.T) { err := rr.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, rr.data) assert.Nil(t, rr.replay) }) @@ -120,7 +120,7 @@ func TestReplayableRequest(t *testing.T) { t.Run("first read in full", func(t *testing.T) { read, err := io.ReadAll(rr.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) @@ -132,14 +132,14 @@ func TestReplayableRequest(t *testing.T) { t.Run("second, partial read", func(t *testing.T) { buf := make([]byte, partial) n, err := io.ReadFull(r, buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, partial, n) assert.Equal(t, message[:partial], buf) }) t.Run("read rest", func(t *testing.T) { read, err := io.ReadAll(r) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, read, len(message)-partial) // Continue from byte "partial" assert.Equal(t, message[partial:], read) @@ -147,13 +147,13 @@ func TestReplayableRequest(t *testing.T) { t.Run("second read in full", func(t *testing.T) { read, err := io.ReadAll(rr.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) t.Run("close rr", func(t *testing.T) { err := rr.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, rr.data) assert.Nil(t, rr.replay) }) @@ -171,7 +171,7 @@ func TestReplayableRequest(t *testing.T) { buf := make([]byte, partial) n, err := io.ReadFull(rr.RawData(), buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, partial, n) assert.Equal(t, message[:partial], buf) }) @@ -179,13 +179,13 @@ func TestReplayableRequest(t *testing.T) { t.Run("replay buffer has partial data", func(t *testing.T) { assert.Equal(t, partial, rr.replay.Len()) read, err := io.ReadAll(bytes.NewReader(rr.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message[:partial], read) }) t.Run("second read in full", func(t *testing.T) { read, err := io.ReadAll(rr.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) @@ -197,21 +197,21 @@ func TestReplayableRequest(t *testing.T) { }) t.Run("replay buffer is full", func(t *testing.T) { - assert.Equal(t, len(message), rr.replay.Len()) + assert.Len(t, message, rr.replay.Len()) read, err := io.ReadAll(bytes.NewReader(rr.replay.Bytes())) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) t.Run("third read in full", func(t *testing.T) { read, err := io.ReadAll(rr.RawData()) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, message, read) }) t.Run("close rr", func(t *testing.T) { err := rr.Close() - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, rr.data) assert.Nil(t, rr.replay) }) diff --git a/pkg/messaging/v1/util.go b/pkg/messaging/v1/util.go index 35318227fdf..54efddd1280 100644 --- a/pkg/messaging/v1/util.go +++ b/pkg/messaging/v1/util.go @@ -125,12 +125,12 @@ func httpHeadersToInternalMetadata(header http.Header) DaprInternalMetadata { internalMD := make(DaprInternalMetadata, len(header)) for key, val := range header { // Note: HTTP headers can never be binary (only gRPC supports binary headers) - if internalMD[key] == nil || len(internalMD[key].Values) == 0 { + if internalMD[key] == nil || len(internalMD[key].GetValues()) == 0 { internalMD[key] = &internalv1pb.ListStringValue{ Values: val, } } else { - internalMD[key].Values = append(internalMD[key].Values, val...) + internalMD[key].Values = append(internalMD[key].GetValues(), val...) } } return internalMD @@ -148,12 +148,12 @@ func fasthttpHeadersToInternalMetadata(header fasthttpHeaders) DaprInternalMetad header.VisitAll(func(key []byte, value []byte) { // Note: fasthttp headers can never be binary (only gRPC supports binary headers) keyStr := string(key) - if internalMD[keyStr] == nil || len(internalMD[keyStr].Values) == 0 { + if internalMD[keyStr] == nil || len(internalMD[keyStr].GetValues()) == 0 { internalMD[keyStr] = &internalv1pb.ListStringValue{ Values: []string{string(value)}, } } else { - internalMD[keyStr].Values = append(internalMD[keyStr].Values, string(value)) + internalMD[keyStr].Values = append(internalMD[keyStr].GetValues(), string(value)) } }) return internalMD @@ -212,13 +212,13 @@ func InternalMetadataToGrpcMetadata(ctx context.Context, internalMD DaprInternal // get both the trace headers for HTTP/GRPC and continue switch keyName { case traceparentHeader: - traceparentValue = listVal.Values[0] + traceparentValue = listVal.GetValues()[0] continue case tracestateHeader: - tracestateValue = listVal.Values[0] + tracestateValue = listVal.GetValues()[0] continue case tracebinMetadata: - grpctracebinValue = listVal.Values[0] + grpctracebinValue = listVal.GetValues()[0] continue case DestinationIDHeader: continue @@ -230,14 +230,14 @@ func InternalMetadataToGrpcMetadata(ctx context.Context, internalMD DaprInternal if strings.HasSuffix(k, gRPCBinaryMetadataSuffix) { // decoded base64 encoded key binary - for _, val := range listVal.Values { + for _, val := range listVal.GetValues() { decoded, err := base64.StdEncoding.DecodeString(val) if err == nil { md.Append(keyName, string(decoded)) } } } else { - md.Append(keyName, listVal.Values...) + md.Append(keyName, listVal.GetValues()...) } } @@ -254,7 +254,7 @@ func InternalMetadataToGrpcMetadata(ctx context.Context, internalMD DaprInternal func IsGRPCProtocol(internalMD DaprInternalMetadata) bool { originContentType := "" if val, ok := internalMD[ContentTypeHeader]; ok { - originContentType = val.Values[0] + originContentType = val.GetValues()[0] } return strings.HasPrefix(originContentType, GRPCContentType) } @@ -275,7 +275,7 @@ func ReservedGRPCMetadataToDaprPrefixHeader(key string) string { func InternalMetadataToHTTPHeader(ctx context.Context, internalMD DaprInternalMetadata, setHeader func(string, string)) { var traceparentValue, tracestateValue, grpctracebinValue string for k, listVal := range internalMD { - if len(listVal.Values) == 0 { + if len(listVal.GetValues()) == 0 { continue } @@ -283,13 +283,13 @@ func InternalMetadataToHTTPHeader(ctx context.Context, internalMD DaprInternalMe // get both the trace headers for HTTP/GRPC and continue switch keyName { case traceparentHeader: - traceparentValue = listVal.Values[0] + traceparentValue = listVal.GetValues()[0] continue case tracestateHeader: - tracestateValue = listVal.Values[0] + tracestateValue = listVal.GetValues()[0] continue case tracebinMetadata: - grpctracebinValue = listVal.Values[0] + grpctracebinValue = listVal.GetValues()[0] continue case DestinationIDHeader: continue @@ -299,7 +299,7 @@ func InternalMetadataToHTTPHeader(ctx context.Context, internalMD DaprInternalMe continue } - for _, v := range listVal.Values { + for _, v := range listVal.GetValues() { setHeader(ReservedGRPCMetadataToDaprPrefixHeader(keyName), v) } } diff --git a/pkg/messaging/v1/util_test.go b/pkg/messaging/v1/util_test.go index 9183163bfcc..119e453a80a 100644 --- a/pkg/messaging/v1/util_test.go +++ b/pkg/messaging/v1/util_test.go @@ -71,13 +71,13 @@ func TestGrpcMetadataToInternalMetadata(t *testing.T) { testMD.Append("multikey", "ciao", "mamma") internalMD := metadataToInternalMetadata(testMD) - require.Equal(t, 1, len(internalMD["key"].GetValues())) + require.Len(t, internalMD["key"].GetValues(), 1) assert.Equal(t, "key value", internalMD["key"].GetValues()[0]) - require.Equal(t, 1, len(internalMD["key-bin"].GetValues())) + require.Len(t, internalMD["key-bin"].GetValues(), 1) assert.Equal(t, base64.StdEncoding.EncodeToString(keyBinValue), internalMD["key-bin"].GetValues()[0], "binary metadata must be saved") - require.Equal(t, 2, len(internalMD["multikey"].GetValues())) + require.Len(t, internalMD["multikey"].GetValues(), 2) assert.Equal(t, []string{"ciao", "mamma"}, internalMD["multikey"].GetValues()) } @@ -243,7 +243,7 @@ func TestErrorFromHTTPResponseCode(t *testing.T) { err := ErrorFromHTTPResponseCode(200, "OK") // assert - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("Created", func(t *testing.T) { @@ -251,7 +251,7 @@ func TestErrorFromHTTPResponseCode(t *testing.T) { err := ErrorFromHTTPResponseCode(201, "Created") // assert - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("NotFound", func(t *testing.T) { @@ -291,7 +291,7 @@ func TestErrorFromHTTPResponseCode(t *testing.T) { // assert s, _ := status.FromError(err) errInfo := (s.Details()[0]).(*epb.ErrorInfo) - assert.Equal(t, 63, len(errInfo.GetMetadata()[errorInfoHTTPErrorMetadata])) + assert.Len(t, errInfo.GetMetadata()[errorInfoHTTPErrorMetadata], 63) }) } @@ -307,9 +307,9 @@ func TestErrorFromInternalStatus(t *testing.T) { ) internal := &internalv1pb.Status{ - Code: expected.Proto().Code, - Message: expected.Proto().Message, - Details: expected.Proto().Details, + Code: expected.Proto().GetCode(), + Message: expected.Proto().GetMessage(), + Details: expected.Proto().GetDetails(), } expected.Message() @@ -334,7 +334,7 @@ func TestProtobufToJSON(t *testing.T) { } jsonBody, err := ProtobufToJSON(tpb) - assert.NoError(t, err) + require.NoError(t, err) t.Log(string(jsonBody)) // protojson produces different indentation space based on OS @@ -383,11 +383,11 @@ func TestFasthttpHeadersToInternalMetadata(t *testing.T) { require.NotEmpty(t, imd) require.NotEmpty(t, imd["Foo"]) - require.NotEmpty(t, imd["Foo"].Values) - assert.Equal(t, []string{"test"}, imd["Foo"].Values) + require.NotEmpty(t, imd["Foo"].GetValues()) + assert.Equal(t, []string{"test"}, imd["Foo"].GetValues()) require.NotEmpty(t, imd["Bar"]) - require.NotEmpty(t, imd["Bar"].Values) - assert.Equal(t, []string{"test2", "test3"}, imd["Bar"].Values) + require.NotEmpty(t, imd["Bar"].GetValues()) + assert.Equal(t, []string{"test2", "test3"}, imd["Bar"].GetValues()) } func TestHttpHeadersToInternalMetadata(t *testing.T) { @@ -400,9 +400,9 @@ func TestHttpHeadersToInternalMetadata(t *testing.T) { require.NotEmpty(t, imd) require.NotEmpty(t, imd["Foo"]) - require.NotEmpty(t, imd["Foo"].Values) - assert.Equal(t, []string{"test"}, imd["Foo"].Values) + require.NotEmpty(t, imd["Foo"].GetValues()) + assert.Equal(t, []string{"test"}, imd["Foo"].GetValues()) require.NotEmpty(t, imd["Bar"]) - require.NotEmpty(t, imd["Bar"].Values) - assert.Equal(t, []string{"test2", "test3"}, imd["Bar"].Values) + require.NotEmpty(t, imd["Bar"].GetValues()) + assert.Equal(t, []string{"test2", "test3"}, imd["Bar"].GetValues()) } diff --git a/pkg/metrics/exporter_test.go b/pkg/metrics/exporter_test.go index ab2eae895d6..03e7697ca42 100644 --- a/pkg/metrics/exporter_test.go +++ b/pkg/metrics/exporter_test.go @@ -19,6 +19,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/dapr/kit/logger" ) @@ -40,7 +41,7 @@ func TestMetricsExporter(t *testing.T) { logger: logger, }, } - assert.Error(t, e.startMetricServer(context.Background())) + require.Error(t, e.startMetricServer(context.Background())) }) t.Run("skip starting metric server but wait for context cancellation", func(t *testing.T) { @@ -56,7 +57,7 @@ func TestMetricsExporter(t *testing.T) { select { case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) case <-time.After(time.Second): t.Error("expected metrics Run() to return in time when context is cancelled") } diff --git a/pkg/nethttpadaptor/nethttpadaptor_test.go b/pkg/nethttpadaptor/nethttpadaptor_test.go index aa23622754d..0f63adbd187 100644 --- a/pkg/nethttpadaptor/nethttpadaptor_test.go +++ b/pkg/nethttpadaptor/nethttpadaptor_test.go @@ -317,7 +317,7 @@ func TestNewNetHTTPHandlerFuncRequests(t *testing.T) { }, func(t *testing.T) func(ctx *fasthttp.RequestCtx) { return func(ctx *fasthttp.RequestCtx) { - assert.Equal(t, 0, len(ctx.Request.Body())) + assert.Empty(t, ctx.Request.Body()) } }, }, diff --git a/pkg/operator/api/api.go b/pkg/operator/api/api.go index d806e77dab7..a5bedbdc6b0 100644 --- a/pkg/operator/api/api.go +++ b/pkg/operator/api/api.go @@ -166,7 +166,7 @@ func (a *apiServer) Ready(ctx context.Context) error { // GetConfiguration returns a Dapr configuration. func (a *apiServer) GetConfiguration(ctx context.Context, in *operatorv1pb.GetConfigurationRequest) (*operatorv1pb.GetConfigurationResponse, error) { - key := types.NamespacedName{Namespace: in.Namespace, Name: in.Name} + key := types.NamespacedName{Namespace: in.GetNamespace(), Name: in.GetName()} var config configurationapi.Configuration if err := a.Client.Get(ctx, key, &config); err != nil { return nil, fmt.Errorf("error getting configuration: %w", err) @@ -184,7 +184,7 @@ func (a *apiServer) GetConfiguration(ctx context.Context, in *operatorv1pb.GetCo func (a *apiServer) ListComponents(ctx context.Context, in *operatorv1pb.ListComponentsRequest) (*operatorv1pb.ListComponentResponse, error) { var components componentsapi.ComponentList if err := a.Client.List(ctx, &components, &client.ListOptions{ - Namespace: in.Namespace, + Namespace: in.GetNamespace(), }); err != nil { return nil, fmt.Errorf("error getting components: %w", err) } @@ -193,18 +193,18 @@ func (a *apiServer) ListComponents(ctx context.Context, in *operatorv1pb.ListCom } for i := range components.Items { c := components.Items[i] // Make a copy since we will refer to this as a reference in this loop. - err := processComponentSecrets(ctx, &c, in.Namespace, a.Client) + err := processComponentSecrets(ctx, &c, in.GetNamespace(), a.Client) if err != nil { - log.Warnf("error processing component %s secrets from pod %s/%s: %s", c.Name, in.Namespace, in.PodName, err) + log.Warnf("error processing component %s secrets from pod %s/%s: %s", c.Name, in.GetNamespace(), in.GetPodName(), err) return &operatorv1pb.ListComponentResponse{}, err } b, err := json.Marshal(&c) if err != nil { - log.Warnf("error marshalling component %s from pod %s/%s: %s", c.Name, in.Namespace, in.PodName, err) + log.Warnf("error marshalling component %s from pod %s/%s: %s", c.Name, in.GetNamespace(), in.GetPodName(), err) continue } - resp.Components = append(resp.Components, b) + resp.Components = append(resp.GetComponents(), b) } return resp, nil } @@ -340,7 +340,7 @@ func (a *apiServer) ListSubscriptionsV2(ctx context.Context, in *operatorv1pb.Li // Only the latest/storage version needs to be returned. var subsV2alpha1 subscriptionsapiV2alpha1.SubscriptionList if err := a.Client.List(ctx, &subsV2alpha1, &client.ListOptions{ - Namespace: in.Namespace, + Namespace: in.GetNamespace(), }); err != nil { return nil, fmt.Errorf("error getting subscriptions: %w", err) } @@ -351,10 +351,10 @@ func (a *apiServer) ListSubscriptionsV2(ctx context.Context, in *operatorv1pb.Li } b, err := json.Marshal(&s) if err != nil { - log.Warnf("error marshalling subscription for pod %s/%s: %s", in.Namespace, in.PodName, err) + log.Warnf("error marshalling subscription for pod %s/%s: %s", in.GetNamespace(), in.GetPodName(), err) continue } - resp.Subscriptions = append(resp.Subscriptions, b) + resp.Subscriptions = append(resp.GetSubscriptions(), b) } return resp, nil @@ -362,7 +362,7 @@ func (a *apiServer) ListSubscriptionsV2(ctx context.Context, in *operatorv1pb.Li // GetResiliency returns a specified resiliency object. func (a *apiServer) GetResiliency(ctx context.Context, in *operatorv1pb.GetResiliencyRequest) (*operatorv1pb.GetResiliencyResponse, error) { - key := types.NamespacedName{Namespace: in.Namespace, Name: in.Name} + key := types.NamespacedName{Namespace: in.GetNamespace(), Name: in.GetName()} var resiliencyConfig resiliencyapi.Resiliency if err := a.Client.Get(ctx, key, &resiliencyConfig); err != nil { return nil, fmt.Errorf("error getting resiliency: %w", err) @@ -384,7 +384,7 @@ func (a *apiServer) ListResiliency(ctx context.Context, in *operatorv1pb.ListRes var resiliencies resiliencyapi.ResiliencyList if err := a.Client.List(ctx, &resiliencies, &client.ListOptions{ - Namespace: in.Namespace, + Namespace: in.GetNamespace(), }); err != nil { return nil, fmt.Errorf("error listing resiliencies: %w", err) } @@ -395,7 +395,7 @@ func (a *apiServer) ListResiliency(ctx context.Context, in *operatorv1pb.ListRes log.Warnf("Error unmarshalling resiliency: %s", err) continue } - resp.Resiliencies = append(resp.Resiliencies, b) + resp.Resiliencies = append(resp.GetResiliencies(), b) } return resp, nil @@ -422,19 +422,19 @@ func (a *apiServer) ComponentUpdate(in *operatorv1pb.ComponentUpdateRequest, srv }() updateComponentFunc := func(ctx context.Context, c *componentsapi.Component) { - if c.Namespace != in.Namespace { + if c.Namespace != in.GetNamespace() { return } - err := processComponentSecrets(ctx, c, in.Namespace, a.Client) + err := processComponentSecrets(ctx, c, in.GetNamespace(), a.Client) if err != nil { - log.Warnf("error processing component %s secrets from pod %s/%s: %s", c.Name, in.Namespace, in.PodName, err) + log.Warnf("error processing component %s secrets from pod %s/%s: %s", c.Name, in.GetNamespace(), in.GetPodName(), err) return } b, err := json.Marshal(&c) if err != nil { - log.Warnf("error serializing component %s (%s) from pod %s/%s: %s", c.GetName(), c.Spec.Type, in.Namespace, in.PodName, err) + log.Warnf("error serializing component %s (%s) from pod %s/%s: %s", c.GetName(), c.Spec.Type, in.GetNamespace(), in.GetPodName(), err) return } @@ -442,11 +442,11 @@ func (a *apiServer) ComponentUpdate(in *operatorv1pb.ComponentUpdateRequest, srv Component: b, }) if err != nil { - log.Warnf("error updating sidecar with component %s (%s) from pod %s/%s: %s", c.GetName(), c.Spec.Type, in.Namespace, in.PodName, err) + log.Warnf("error updating sidecar with component %s (%s) from pod %s/%s: %s", c.GetName(), c.Spec.Type, in.GetNamespace(), in.GetPodName(), err) return } - log.Infof("updated sidecar with component %s (%s) from pod %s/%s", c.GetName(), c.Spec.Type, in.Namespace, in.PodName) + log.Infof("updated sidecar with component %s (%s) from pod %s/%s", c.GetName(), c.Spec.Type, in.GetNamespace(), in.GetPodName()) } var wg sync.WaitGroup @@ -470,7 +470,7 @@ func (a *apiServer) ComponentUpdate(in *operatorv1pb.ComponentUpdateRequest, srv // GetHTTPEndpoint returns a specified http endpoint object. func (a *apiServer) GetHTTPEndpoint(ctx context.Context, in *operatorv1pb.GetResiliencyRequest) (*operatorv1pb.GetHTTPEndpointResponse, error) { - key := types.NamespacedName{Namespace: in.Namespace, Name: in.Name} + key := types.NamespacedName{Namespace: in.GetNamespace(), Name: in.GetName()} var endpointConfig httpendpointsapi.HTTPEndpoint if err := a.Client.Get(ctx, key, &endpointConfig); err != nil { return nil, fmt.Errorf("error getting http endpoint: %w", err) @@ -492,7 +492,7 @@ func (a *apiServer) ListHTTPEndpoints(ctx context.Context, in *operatorv1pb.List var endpoints httpendpointsapi.HTTPEndpointList if err := a.Client.List(ctx, &endpoints, &client.ListOptions{ - Namespace: in.Namespace, + Namespace: in.GetNamespace(), }); err != nil { return nil, fmt.Errorf("error listing http endpoints: %w", err) } @@ -510,7 +510,7 @@ func (a *apiServer) ListHTTPEndpoints(ctx context.Context, in *operatorv1pb.List log.Warnf("Error unmarshalling http endpoints: %s", err) continue } - resp.HttpEndpoints = append(resp.HttpEndpoints, b) + resp.HttpEndpoints = append(resp.GetHttpEndpoints(), b) } return resp, nil @@ -537,18 +537,18 @@ func (a *apiServer) HTTPEndpointUpdate(in *operatorv1pb.HTTPEndpointUpdateReques }() updateHTTPEndpointFunc := func(ctx context.Context, e *httpendpointsapi.HTTPEndpoint) { - if e.Namespace != in.Namespace { + if e.Namespace != in.GetNamespace() { return } - err := processHTTPEndpointSecrets(ctx, e, in.Namespace, a.Client) + err := processHTTPEndpointSecrets(ctx, e, in.GetNamespace(), a.Client) if err != nil { - log.Warnf("error processing http endpoint %s secrets from pod %s/%s: %s", e.Name, in.Namespace, in.PodName, err) + log.Warnf("error processing http endpoint %s secrets from pod %s/%s: %s", e.Name, in.GetNamespace(), in.GetPodName(), err) return } b, err := json.Marshal(&e) if err != nil { - log.Warnf("error serializing http endpoint %s from pod %s/%s: %s", e.GetName(), in.Namespace, in.PodName, err) + log.Warnf("error serializing http endpoint %s from pod %s/%s: %s", e.GetName(), in.GetNamespace(), in.GetPodName(), err) return } @@ -556,11 +556,11 @@ func (a *apiServer) HTTPEndpointUpdate(in *operatorv1pb.HTTPEndpointUpdateReques HttpEndpoints: b, }) if err != nil { - log.Warnf("error updating sidecar with http endpoint %s from pod %s/%s: %s", e.GetName(), in.Namespace, in.PodName, err) + log.Warnf("error updating sidecar with http endpoint %s from pod %s/%s: %s", e.GetName(), in.GetNamespace(), in.GetPodName(), err) return } - log.Infof("updated sidecar with http endpoint %s from pod %s/%s", e.GetName(), in.Namespace, in.PodName) + log.Infof("updated sidecar with http endpoint %s from pod %s/%s", e.GetName(), in.GetNamespace(), in.GetPodName()) } var wg sync.WaitGroup diff --git a/pkg/operator/api/api_test.go b/pkg/operator/api/api_test.go index 27296fa1b21..a26fe88681a 100644 --- a/pkg/operator/api/api_test.go +++ b/pkg/operator/api/api_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -86,7 +87,7 @@ func TestProcessComponentSecrets(t *testing.T) { } err := processComponentSecrets(context.Background(), &c, "default", nil) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("secret ref exists, kubernetes secret store, secret extracted", func(t *testing.T) { @@ -109,10 +110,10 @@ func TestProcessComponentSecrets(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = corev1.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) client := fake.NewClientBuilder(). WithScheme(s). @@ -128,7 +129,7 @@ func TestProcessComponentSecrets(t *testing.T) { Build() err = processComponentSecrets(context.Background(), &c, "default", client) - assert.NoError(t, err) + require.NoError(t, err) enc := base64.StdEncoding.EncodeToString([]byte("value1")) jsonEnc, _ := json.Marshal(enc) @@ -156,10 +157,10 @@ func TestProcessComponentSecrets(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = corev1.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) client := fake.NewClientBuilder(). WithScheme(s). @@ -175,7 +176,7 @@ func TestProcessComponentSecrets(t *testing.T) { Build() err = processComponentSecrets(context.Background(), &c, "default", client) - assert.NoError(t, err) + require.NoError(t, err) enc := base64.StdEncoding.EncodeToString([]byte("value1")) jsonEnc, _ := json.Marshal(enc) @@ -195,10 +196,10 @@ func TestComponentUpdate(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = corev1.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) client := fake.NewClientBuilder(). WithScheme(s).Build() @@ -222,7 +223,7 @@ func TestComponentUpdate(t *testing.T) { }() // Start sidecar update loop - assert.NoError(t, api.ComponentUpdate(&operatorv1pb.ComponentUpdateRequest{ + require.NoError(t, api.ComponentUpdate(&operatorv1pb.ComponentUpdateRequest{ Namespace: "ns2", }, mockSidecar)) @@ -239,10 +240,10 @@ func TestComponentUpdate(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = corev1.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) client := fake.NewClientBuilder(). WithScheme(s).Build() @@ -284,10 +285,10 @@ func TestHTTPEndpointUpdate(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = corev1.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) client := fake.NewClientBuilder(). WithScheme(s).Build() @@ -311,7 +312,7 @@ func TestHTTPEndpointUpdate(t *testing.T) { }() // Start sidecar update loop - assert.NoError(t, api.HTTPEndpointUpdate(&operatorv1pb.HTTPEndpointUpdateRequest{ + require.NoError(t, api.HTTPEndpointUpdate(&operatorv1pb.HTTPEndpointUpdateRequest{ Namespace: "ns2", }, mockSidecar)) @@ -335,7 +336,7 @@ func TestHTTPEndpointUpdate(t *testing.T) { }() // Start sidecar update loop - assert.NoError(t, api.HTTPEndpointUpdate(&operatorv1pb.HTTPEndpointUpdateRequest{ + require.NoError(t, api.HTTPEndpointUpdate(&operatorv1pb.HTTPEndpointUpdateRequest{ Namespace: "ns1", }, mockSidecar)) @@ -347,10 +348,10 @@ func TestListsNamespaced(t *testing.T) { t.Run("list components namespace scoping", func(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = componentsapi.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) av, kind := componentsapi.SchemeGroupVersion.WithKind("Component").ToAPIVersionAndKind() typeMeta := metav1.TypeMeta{ @@ -380,11 +381,11 @@ func TestListsNamespaced(t *testing.T) { PodName: "foo", Namespace: "namespace-a", }) - assert.NoError(t, err) - assert.Equal(t, 1, len(res.GetComponents())) + require.NoError(t, err) + assert.Len(t, res.GetComponents(), 1) var sub resiliencyapi.Resiliency - assert.NoError(t, yaml.Unmarshal(res.GetComponents()[0], &sub)) + require.NoError(t, yaml.Unmarshal(res.GetComponents()[0], &sub)) assert.Equal(t, "obj1", sub.Name) assert.Equal(t, "namespace-a", sub.Namespace) @@ -393,16 +394,16 @@ func TestListsNamespaced(t *testing.T) { PodName: "foo", Namespace: "namespace-c", }) - assert.Nil(t, err) - assert.Equal(t, 0, len(res.GetComponents())) + require.NoError(t, err) + assert.Empty(t, res.GetComponents()) }) t.Run("list subscriptions namespace scoping", func(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = subscriptionsapiV2alpha1.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) av, kind := subscriptionsapiV2alpha1.SchemeGroupVersion.WithKind("Subscription").ToAPIVersionAndKind() typeMeta := metav1.TypeMeta{ @@ -433,12 +434,12 @@ func TestListsNamespaced(t *testing.T) { Namespace: "namespace-a", }) - assert.Nil(t, err) - assert.Equal(t, 1, len(res.GetSubscriptions())) + require.NoError(t, err) + assert.Len(t, res.GetSubscriptions(), 1) var sub subscriptionsapiV2alpha1.Subscription err = yaml.Unmarshal(res.GetSubscriptions()[0], &sub) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, "sub1", sub.Name) assert.Equal(t, "namespace-a", sub.Namespace) @@ -447,16 +448,16 @@ func TestListsNamespaced(t *testing.T) { PodName: "baz", Namespace: "namespace-c", }) - assert.Nil(t, err) - assert.Equal(t, 0, len(res.GetSubscriptions())) + require.NoError(t, err) + assert.Empty(t, res.GetSubscriptions()) }) t.Run("list resiliencies namespace scoping", func(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = resiliencyapi.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) av, kind := resiliencyapi.SchemeGroupVersion.WithKind("Resiliency").ToAPIVersionAndKind() typeMeta := metav1.TypeMeta{ @@ -486,12 +487,12 @@ func TestListsNamespaced(t *testing.T) { Namespace: "namespace-a", }) - assert.Nil(t, err) - assert.Equal(t, 1, len(res.GetResiliencies())) + require.NoError(t, err) + assert.Len(t, res.GetResiliencies(), 1) var sub resiliencyapi.Resiliency err = yaml.Unmarshal(res.GetResiliencies()[0], &sub) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, "obj1", sub.Name) assert.Equal(t, "namespace-a", sub.Namespace) @@ -499,16 +500,16 @@ func TestListsNamespaced(t *testing.T) { res, err = api.ListResiliency(context.TODO(), &operatorv1pb.ListResiliencyRequest{ Namespace: "namespace-c", }) - assert.Nil(t, err) - assert.Equal(t, 0, len(res.GetResiliencies())) + require.NoError(t, err) + assert.Empty(t, res.GetResiliencies()) }) t.Run("list http endpoints namespace scoping", func(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = httpendpointapi.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) av, kind := httpendpointapi.SchemeGroupVersion.WithKind("HTTPEndpoint").ToAPIVersionAndKind() typeMeta := metav1.TypeMeta{ @@ -538,12 +539,12 @@ func TestListsNamespaced(t *testing.T) { Namespace: "namespace-a", }) - assert.Nil(t, err) - assert.Equal(t, 1, len(res.GetHttpEndpoints())) + require.NoError(t, err) + assert.Len(t, res.GetHttpEndpoints(), 1) var endpoint httpendpointapi.HTTPEndpoint err = yaml.Unmarshal(res.GetHttpEndpoints()[0], &endpoint) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, "obj1", endpoint.Name) assert.Equal(t, "namespace-a", endpoint.Namespace) @@ -551,8 +552,8 @@ func TestListsNamespaced(t *testing.T) { res, err = api.ListHTTPEndpoints(context.TODO(), &operatorv1pb.ListHTTPEndpointsRequest{ Namespace: "namespace-c", }) - assert.Nil(t, err) - assert.Equal(t, 0, len(res.GetHttpEndpoints())) + require.NoError(t, err) + assert.Empty(t, res.GetHttpEndpoints()) }) } @@ -576,17 +577,17 @@ func TestProcessHTTPEndpointSecrets(t *testing.T) { } t.Run("secret ref exists, not kubernetes secret store, no error", func(t *testing.T) { err := processHTTPEndpointSecrets(context.Background(), &e, "default", nil) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("secret ref exists, kubernetes secret store, secret extracted", func(t *testing.T) { e.Auth.SecretStore = kubernetesSecretStore s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = corev1.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) client := fake.NewClientBuilder(). WithScheme(s). @@ -600,10 +601,10 @@ func TestProcessHTTPEndpointSecrets(t *testing.T) { }, }). Build() - assert.NoError(t, processHTTPEndpointSecrets(context.Background(), &e, "default", client)) + require.NoError(t, processHTTPEndpointSecrets(context.Background(), &e, "default", client)) enc := base64.StdEncoding.EncodeToString([]byte("value1")) jsonEnc, err := json.Marshal(enc) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, jsonEnc, e.Spec.Headers[0].Value.Raw) }) @@ -611,10 +612,10 @@ func TestProcessHTTPEndpointSecrets(t *testing.T) { e.Auth.SecretStore = "" s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) err = corev1.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) client := fake.NewClientBuilder(). WithScheme(s). @@ -629,11 +630,11 @@ func TestProcessHTTPEndpointSecrets(t *testing.T) { }). Build() - assert.NoError(t, processHTTPEndpointSecrets(context.Background(), &e, "default", client)) + require.NoError(t, processHTTPEndpointSecrets(context.Background(), &e, "default", client)) enc := base64.StdEncoding.EncodeToString([]byte("value1")) jsonEnc, err := json.Marshal(enc) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, jsonEnc, e.Spec.Headers[0].Value.Raw) }) } diff --git a/pkg/operator/handlers/dapr_handler_test.go b/pkg/operator/handlers/dapr_handler_test.go index c6d1d105a4a..618931fc73c 100644 --- a/pkg/operator/handlers/dapr_handler_test.go +++ b/pkg/operator/handlers/dapr_handler_test.go @@ -23,7 +23,7 @@ import ( func TestNewDaprHandler(t *testing.T) { d := getTestDaprHandler() - assert.True(t, d != nil) + assert.NotNil(t, d) } func TestGetAppID(t *testing.T) { @@ -96,13 +96,13 @@ func TestDaprService(t *testing.T) { t.Run("invalid empty app id", func(t *testing.T) { d := getDeployment("", "true") err := getTestDaprHandler().ensureDaprServicePresent(context.TODO(), "default", d) - assert.Error(t, err) + require.Error(t, err) }) t.Run("invalid char app id", func(t *testing.T) { d := getDeployment("myapp@", "true") err := getTestDaprHandler().ensureDaprServicePresent(context.TODO(), "default", d) - assert.Error(t, err) + require.Error(t, err) }) } @@ -138,7 +138,7 @@ func TestPatchDaprService(t *testing.T) { s := runtime.NewScheme() err := scheme.AddToScheme(s) - assert.NoError(t, err) + require.NoError(t, err) testDaprHandler.Scheme = s cli := fake.NewClientBuilder().WithScheme(s).Build() @@ -152,10 +152,10 @@ func TestPatchDaprService(t *testing.T) { deployment := getDeployment("test", "true") err = testDaprHandler.createDaprService(ctx, myDaprService, deployment) - assert.NoError(t, err) + require.NoError(t, err) var actualService corev1.Service err = cli.Get(ctx, myDaprService, &actualService) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", actualService.ObjectMeta.Annotations[annotations.KeyAppID]) assert.Equal(t, "true", actualService.ObjectMeta.Annotations["prometheus.io/scrape"]) assert.Equal(t, "/", actualService.ObjectMeta.Annotations["prometheus.io/path"]) @@ -164,9 +164,9 @@ func TestPatchDaprService(t *testing.T) { assert.Equal(t, "app", actualService.OwnerReferences[0].Name) err = testDaprHandler.patchDaprService(ctx, myDaprService, deployment, actualService) - assert.NoError(t, err) + require.NoError(t, err) err = cli.Get(ctx, myDaprService, &actualService) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", actualService.ObjectMeta.Annotations[annotations.KeyAppID]) assert.Equal(t, "true", actualService.ObjectMeta.Annotations["prometheus.io/scrape"]) assert.Equal(t, "/", actualService.ObjectMeta.Annotations["prometheus.io/path"]) @@ -251,9 +251,9 @@ func TestInit(t *testing.T) { err := handler.Init(context.Background()) - assert.Nil(t, err) + require.NoError(t, err) - assert.Equal(t, 3, len(mgr.GetRunnables())) + assert.Len(t, mgr.GetRunnables(), 3) srv := &corev1.Service{} val := mgr.GetIndexerFunc(&corev1.Service{})(srv) diff --git a/pkg/placement/ha_test.go b/pkg/placement/ha_test.go index 36d04230c78..b469c53229b 100644 --- a/pkg/placement/ha_test.go +++ b/pkg/placement/ha_test.go @@ -132,7 +132,7 @@ func TestPlacementHA(t *testing.T) { // If leadership is lost, we should retry return false } - assert.NoError(t, err) + require.NoError(t, err) retrieveValidState(t, raftServers[findLeader(t, raftServers)], testMembers[1]) return true diff --git a/pkg/placement/hashing/consistent_hash_test.go b/pkg/placement/hashing/consistent_hash_test.go index 1a6b2ca9771..661e6597bab 100644 --- a/pkg/placement/hashing/consistent_hash_test.go +++ b/pkg/placement/hashing/consistent_hash_test.go @@ -14,10 +14,11 @@ limitations under the License. package hashing import ( - "fmt" + "strconv" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var nodes = []string{"node1", "node2", "node3", "node4", "node5"} @@ -25,7 +26,7 @@ var nodes = []string{"node1", "node2", "node3", "node4", "node5"} func TestReplicationFactor(t *testing.T) { keys := []string{} for i := 0; i < 100; i++ { - keys = append(keys, fmt.Sprint(i)) + keys = append(keys, strconv.Itoa(i)) } t.Run("varying replication factors, no movement", func(t *testing.T) { @@ -44,7 +45,7 @@ func TestReplicationFactor(t *testing.T) { for _, k := range keys { h, err := h.Get(k) - assert.NoError(t, err) + require.NoError(t, err) k1[k] = h } @@ -54,7 +55,7 @@ func TestReplicationFactor(t *testing.T) { for _, k := range keys { h, err := h.Get(k) - assert.NoError(t, err) + require.NoError(t, err) orgS := k1[k] if orgS != nodeToRemove { diff --git a/pkg/placement/membership.go b/pkg/placement/membership.go index 6697a01bd07..5fd7d2cba32 100644 --- a/pkg/placement/membership.go +++ b/pkg/placement/membership.go @@ -328,7 +328,7 @@ func (p *Service) performTableDissemination(ctx context.Context) error { state := p.raftNode.FSM().PlacementState() log.Infof( "Start disseminating tables. memberUpdateCount: %d, streams: %d, targets: %d, table generation: %s", - cnt, nStreamConnPool, nTargetConns, state.Version) + cnt, nStreamConnPool, nTargetConns, state.GetVersion()) p.streamConnPoolLock.RLock() streamConnPool := make([]placementGRPCStream, len(p.streamConnPool)) copy(streamConnPool, p.streamConnPool) @@ -338,7 +338,7 @@ func (p *Service) performTableDissemination(ctx context.Context) error { } log.Infof( "Completed dissemination. memberUpdateCount: %d, streams: %d, targets: %d, table generation: %s", - cnt, nStreamConnPool, nTargetConns, state.Version) + cnt, nStreamConnPool, nTargetConns, state.GetVersion()) p.memberUpdateCount.Store(0) // set faultyHostDetectDuration to the default duration. @@ -358,10 +358,10 @@ func (p *Service) performTablesUpdate(ctx context.Context, hosts []placementGRPC // Enforce maximum API level if newTable != nil { - if newTable.ApiLevel < p.minAPILevel { + if newTable.GetApiLevel() < p.minAPILevel { newTable.ApiLevel = p.minAPILevel } - if p.maxAPILevel != nil && newTable.ApiLevel > *p.maxAPILevel { + if p.maxAPILevel != nil && newTable.GetApiLevel() > *p.maxAPILevel { newTable.ApiLevel = *p.maxAPILevel } } diff --git a/pkg/placement/membership_test.go b/pkg/placement/membership_test.go index 84325ea16a3..e82ae683f0d 100644 --- a/pkg/placement/membership_test.go +++ b/pkg/placement/membership_test.go @@ -55,7 +55,7 @@ func TestMembershipChangeWorker(t *testing.T) { membershipStopCh := make(chan struct{}) cleanupStates() - assert.Equal(t, 0, len(testServer.raftNode.FSM().State().Members())) + assert.Empty(t, testServer.raftNode.FSM().State().Members()) go func() { defer close(membershipStopCh) @@ -105,7 +105,7 @@ func TestMembershipChangeWorker(t *testing.T) { for { placementOrder, streamErr := stream.Recv() require.NoError(t, streamErr) - if placementOrder.Operation == "unlock" { + if placementOrder.GetOperation() == "unlock" { done <- true return } @@ -121,7 +121,7 @@ func TestMembershipChangeWorker(t *testing.T) { } // act - assert.NoError(t, stream.Send(host)) + require.NoError(t, stream.Send(host)) select { case <-done: @@ -217,21 +217,21 @@ func TestPerformTableUpdate(t *testing.T) { } if placementOrder != nil { clientRecvDataLock.Lock() - clientRecvData[clientID][placementOrder.Operation] = clock.Now().UnixNano() + clientRecvData[clientID][placementOrder.GetOperation()] = clock.Now().UnixNano() clientRecvDataLock.Unlock() // Check if the table is up to date. - if placementOrder.Operation == "update" { - if placementOrder.Tables != nil { + if placementOrder.GetOperation() == "update" { + if placementOrder.GetTables() != nil { upToDate = true - for _, entries := range placementOrder.Tables.Entries { + for _, entries := range placementOrder.GetTables().GetEntries() { // Check if all clients are in load map. - if len(entries.LoadMap) != testClients { + if len(entries.GetLoadMap()) != testClients { upToDate = false } } } } - if placementOrder.Operation == "unlock" { + if placementOrder.GetOperation() == "unlock" { if upToDate { clientUpToDateCh <- struct{}{} return @@ -253,7 +253,7 @@ func TestPerformTableUpdate(t *testing.T) { } // act - assert.NoError(t, clientStreams[i].Send(host)) + require.NoError(t, clientStreams[i].Send(host)) } // Wait until clientStreams[clientID].Recv() in client go routine received new table @@ -282,7 +282,7 @@ func TestPerformTableUpdate(t *testing.T) { testServer.streamConnPoolLock.RUnlock() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - assert.NoError(t, testServer.performTablesUpdate(ctx, streamConnPool, nil)) + require.NoError(t, testServer.performTablesUpdate(ctx, streamConnPool, nil)) // assert for i := 0; i < testClients; i++ { @@ -347,19 +347,19 @@ func PerformTableUpdateCostTime(t *testing.T) (wastedTime int64) { return } if placementOrder != nil { - if placementOrder.Operation == "lock" { - if startFlag.Load() && placementOrder.Tables != nil && placementOrder.Tables.Version == "demo" { + if placementOrder.GetOperation() == "lock" { + if startFlag.Load() && placementOrder.GetTables() != nil && placementOrder.GetTables().GetVersion() == "demo" { start = time.Now() if clientID == 1 { t.Log("client 1 lock", start) } } } - if placementOrder.Operation == "update" { + if placementOrder.GetOperation() == "update" { continue } - if placementOrder.Operation == "unlock" { - if startFlag.Load() && placementOrder.Tables != nil && placementOrder.Tables.Version == "demo" { + if placementOrder.GetOperation() == "unlock" { + if startFlag.Load() && placementOrder.GetTables() != nil && placementOrder.GetTables().GetVersion() == "demo" { if clientID == 1 { t.Log("client 1 unlock", time.Now()) } diff --git a/pkg/placement/placement.go b/pkg/placement/placement.go index c41d2092e9c..b0dca8031fa 100644 --- a/pkg/placement/placement.go +++ b/pkg/placement/placement.go @@ -227,8 +227,8 @@ func (p *Service) ReportDaprStatus(stream placementv1pb.Placement_ReportDaprStat req, err := stream.Recv() switch err { case nil: - if clientID != nil && req.Id != clientID.AppID() { - return status.Errorf(codes.PermissionDenied, "client ID %s is not allowed", req.Id) + if clientID != nil && req.GetId() != clientID.AppID() { + return status.Errorf(codes.PermissionDenied, "client ID %s is not allowed", req.GetId()) } state := p.raftNode.FSM().State() @@ -243,11 +243,11 @@ func (p *Service) ReportDaprStatus(stream placementv1pb.Placement_ReportDaprStat if p.maxAPILevel != nil && clusterAPILevel > *p.maxAPILevel { clusterAPILevel = *p.maxAPILevel } - if req.ApiLevel < clusterAPILevel { - return status.Errorf(codes.FailedPrecondition, "The cluster's Actor API level is %d, which is higher than the reported API level %d", clusterAPILevel, req.ApiLevel) + if req.GetApiLevel() < clusterAPILevel { + return status.Errorf(codes.FailedPrecondition, "The cluster's Actor API level is %d, which is higher than the reported API level %d", clusterAPILevel, req.GetApiLevel()) } - registeredMemberID = req.Name + registeredMemberID = req.GetName() p.addStreamConn(stream) // We need to use a background context here so dissemination isn't tied to the context of this stream // TODO: If each sidecar can report table version, then placement @@ -260,7 +260,7 @@ func (p *Service) ReportDaprStatus(stream placementv1pb.Placement_ReportDaprStat } // Ensure that the incoming runtime is actor instance. - isActorRuntime = len(req.Entities) > 0 + isActorRuntime = len(req.GetEntities()) > 0 if !isActorRuntime { // ignore if this runtime is non-actor. continue @@ -268,22 +268,22 @@ func (p *Service) ReportDaprStatus(stream placementv1pb.Placement_ReportDaprStat now := p.clock.Now() - for _, entity := range req.Entities { - monitoring.RecordActorHeartbeat(req.Id, entity, req.Name, req.Pod, now) + for _, entity := range req.GetEntities() { + monitoring.RecordActorHeartbeat(req.GetId(), entity, req.GetName(), req.GetPod(), now) } // Record the heartbeat timestamp. This timestamp will be used to check if the member // state maintained by raft is valid or not. If the member is outdated based the timestamp // the member will be marked as faulty node and removed. - p.lastHeartBeat.Store(req.Name, now.UnixNano()) + p.lastHeartBeat.Store(req.GetName(), now.UnixNano()) members := state.Members() // Upsert incoming member only if it is an actor service (not actor client) and // the existing member info is unmatched with the incoming member info. upsertRequired := true - if m, ok := members[req.Name]; ok { - if m.AppID == req.Id && m.Name == req.Name && cmp.Equal(m.Entities, req.Entities) { + if m, ok := members[req.GetName()]; ok { + if m.AppID == req.GetId() && m.Name == req.GetName() && cmp.Equal(m.Entities, req.GetEntities()) { upsertRequired = false } } @@ -292,14 +292,14 @@ func (p *Service) ReportDaprStatus(stream placementv1pb.Placement_ReportDaprStat p.membershipCh <- hostMemberChange{ cmdType: raft.MemberUpsert, host: raft.DaprHostMember{ - Name: req.Name, - AppID: req.Id, - Entities: req.Entities, + Name: req.GetName(), + AppID: req.GetId(), + Entities: req.GetEntities(), UpdatedAt: now.UnixNano(), - APILevel: req.ApiLevel, + APILevel: req.GetApiLevel(), }, } - log.Debugf("Member changed upserting appid %s with entities %v", req.Id, req.Entities) + log.Debugf("Member changed upserting appid %s with entities %v", req.GetId(), req.GetEntities()) } default: diff --git a/pkg/placement/placement_test.go b/pkg/placement/placement_test.go index 7f77b87f2a0..f450913d673 100644 --- a/pkg/placement/placement_test.go +++ b/pkg/placement/placement_test.go @@ -155,10 +155,10 @@ func TestMemberRegistration_Leadership(t *testing.T) { select { case memberChange := <-testServer.membershipCh: assert.Equal(t, raft.MemberUpsert, memberChange.cmdType) - assert.Equal(t, host.Name, memberChange.host.Name) - assert.Equal(t, host.Id, memberChange.host.AppID) - assert.EqualValues(t, host.Entities, memberChange.host.Entities) - assert.Equal(t, 1, len(testServer.streamConnPool)) + assert.Equal(t, host.GetName(), memberChange.host.Name) + assert.Equal(t, host.GetId(), memberChange.host.AppID) + assert.EqualValues(t, host.GetEntities(), memberChange.host.Entities) + assert.Len(t, testServer.streamConnPool, 1) return true default: return false @@ -174,7 +174,7 @@ func TestMemberRegistration_Leadership(t *testing.T) { select { case memberChange := <-testServer.membershipCh: assert.Equal(t, raft.MemberRemove, memberChange.cmdType) - assert.Equal(t, host.Name, memberChange.host.Name) + assert.Equal(t, host.GetName(), memberChange.host.Name) case <-time.After(testStreamSendLatency): require.Fail(t, "no membership change") @@ -203,9 +203,9 @@ func TestMemberRegistration_Leadership(t *testing.T) { select { case memberChange := <-testServer.membershipCh: assert.Equal(t, raft.MemberUpsert, memberChange.cmdType) - assert.Equal(t, host.Name, memberChange.host.Name) - assert.Equal(t, host.Id, memberChange.host.AppID) - assert.EqualValues(t, host.Entities, memberChange.host.Entities) + assert.Equal(t, host.GetName(), memberChange.host.Name) + assert.Equal(t, host.GetId(), memberChange.host.AppID) + assert.EqualValues(t, host.GetEntities(), memberChange.host.Entities) testServer.streamConnPoolLock.Lock() l := len(testServer.streamConnPool) testServer.streamConnPoolLock.Unlock() @@ -261,6 +261,6 @@ func TestMemberRegistration_Leadership(t *testing.T) { // act // Close tcp connection before closing stream, which simulates the scenario // where dapr runtime disconnects the connection from placement service unexpectedly. - assert.NoError(t, conn.Close()) + require.NoError(t, conn.Close()) }) } diff --git a/pkg/placement/raft/fsm.go b/pkg/placement/raft/fsm.go index ae5ad776e3d..a5de50f44d0 100644 --- a/pkg/placement/raft/fsm.go +++ b/pkg/placement/raft/fsm.go @@ -95,7 +95,7 @@ func (c *FSM) PlacementState() *v1pb.PlacementTables { table.Hosts[lk] = lv } - copy(table.SortedSet, sortedSet) + copy(table.GetSortedSet(), sortedSet) for lk, lv := range loadMap { h := v1pb.Host{ @@ -110,12 +110,12 @@ func (c *FSM) PlacementState() *v1pb.PlacementTables { newTable.Entries[k] = &table - totalHostSize += len(table.Hosts) - totalSortedSet += len(table.SortedSet) - totalLoadMap += len(table.LoadMap) + totalHostSize += len(table.GetHosts()) + totalSortedSet += len(table.GetSortedSet()) + totalLoadMap += len(table.GetLoadMap()) } - logging.Debugf("PlacementTable HostsCount=%d SortedSetCount=%d LoadMapCount=%d ApiLevel=%d", totalHostSize, totalSortedSet, totalLoadMap, newTable.ApiLevel) + logging.Debugf("PlacementTable HostsCount=%d SortedSetCount=%d LoadMapCount=%d ApiLevel=%d", totalHostSize, totalSortedSet, totalLoadMap, newTable.GetApiLevel()) return newTable } diff --git a/pkg/placement/raft/fsm_test.go b/pkg/placement/raft/fsm_test.go index 9858343572c..68f161f84b6 100644 --- a/pkg/placement/raft/fsm_test.go +++ b/pkg/placement/raft/fsm_test.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/raft" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFSMApply(t *testing.T) { @@ -32,7 +33,7 @@ func TestFSMApply(t *testing.T) { Entities: []string{"actorTypeOne", "actorTypeTwo"}, }) - assert.NoError(t, err) + require.NoError(t, err) raftLog := &raft.Log{ Index: 1, @@ -47,7 +48,7 @@ func TestFSMApply(t *testing.T) { assert.True(t, ok) assert.True(t, updated) assert.Equal(t, uint64(1), fsm.state.TableGeneration()) - assert.Equal(t, 1, len(fsm.state.Members())) + assert.Len(t, fsm.state.Members(), 1) }) t.Run("removeMember", func(t *testing.T) { @@ -55,7 +56,7 @@ func TestFSMApply(t *testing.T) { Name: "127.0.0.1:3030", }) - assert.NoError(t, err) + require.NoError(t, err) raftLog := &raft.Log{ Index: 2, @@ -70,7 +71,7 @@ func TestFSMApply(t *testing.T) { assert.True(t, ok) assert.True(t, updated) assert.Equal(t, uint64(2), fsm.state.TableGeneration()) - assert.Equal(t, 0, len(fsm.state.Members())) + assert.Empty(t, fsm.state.Members()) }) } @@ -86,15 +87,15 @@ func TestRestore(t *testing.T) { }) buf := bytes.NewBuffer(make([]byte, 0, 256)) err := s.persist(buf) - assert.NoError(t, err) + require.NoError(t, err) // act err = fsm.Restore(io.NopCloser(buf)) // assert - assert.NoError(t, err) - assert.Equal(t, 1, len(fsm.State().Members())) - assert.Equal(t, 2, len(fsm.State().hashingTableMap())) + require.NoError(t, err) + assert.Len(t, fsm.State().Members(), 1) + assert.Len(t, fsm.State().hashingTableMap(), 2) } func TestPlacementState(t *testing.T) { @@ -105,7 +106,7 @@ func TestPlacementState(t *testing.T) { Entities: []string{"actorTypeOne", "actorTypeTwo"}, } cmdLog, err := makeRaftLogCommand(MemberUpsert, m) - assert.NoError(t, err) + require.NoError(t, err) fsm.Apply(&raft.Log{ Index: 1, @@ -115,6 +116,6 @@ func TestPlacementState(t *testing.T) { }) newTable := fsm.PlacementState() - assert.Equal(t, "1", newTable.Version) - assert.Equal(t, 2, len(newTable.Entries)) + assert.Equal(t, "1", newTable.GetVersion()) + assert.Len(t, newTable.GetEntries(), 2) } diff --git a/pkg/placement/raft/snapshot_test.go b/pkg/placement/raft/snapshot_test.go index 1846fec7a8e..d44e4de4c53 100644 --- a/pkg/placement/raft/snapshot_test.go +++ b/pkg/placement/raft/snapshot_test.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/raft" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type MockSnapShotSink struct { @@ -60,13 +61,13 @@ func TestPersist(t *testing.T) { // act snap, err := fsm.Snapshot() - assert.NoError(t, err) + require.NoError(t, err) snap.Persist(fakeSink) // assert restoredState := newDaprHostMemberState() err = restoredState.restore(buf) - assert.NoError(t, err) + require.NoError(t, err) expectedMember := fsm.State().Members()[testMember.Name] restoredMember := restoredState.Members()[testMember.Name] diff --git a/pkg/placement/raft/state_test.go b/pkg/placement/raft/state_test.go index 76f46022832..13f39e98707 100644 --- a/pkg/placement/raft/state_test.go +++ b/pkg/placement/raft/state_test.go @@ -25,8 +25,8 @@ func TestNewDaprHostMemberState(t *testing.T) { // assert assert.Equal(t, uint64(0), s.Index()) - assert.Equal(t, 0, len(s.Members())) - assert.Equal(t, 0, len(s.hashingTableMap())) + assert.Empty(t, s.Members()) + assert.Empty(t, s.hashingTableMap()) } func TestClone(t *testing.T) { @@ -62,8 +62,8 @@ func TestUpsertMember(t *testing.T) { }) // assert - assert.Equal(t, 1, len(s.Members())) - assert.Equal(t, 2, len(s.hashingTableMap())) + assert.Len(t, s.Members(), 1) + assert.Len(t, s.hashingTableMap(), 2) assert.True(t, updated) }) @@ -77,8 +77,8 @@ func TestUpsertMember(t *testing.T) { }) // assert - assert.Equal(t, 2, len(s.Members())) - assert.Equal(t, 2, len(s.hashingTableMap())) + assert.Len(t, s.Members(), 2) + assert.Len(t, s.hashingTableMap(), 2) assert.True(t, updated) // act @@ -122,10 +122,10 @@ func TestUpsertMember(t *testing.T) { updated := s.upsertMember(testMember) // assert - assert.Equal(t, 2, len(s.Members())) + assert.Len(t, s.Members(), 2) assert.True(t, updated) - assert.Equal(t, 1, len(s.Members()[testMember.Name].Entities)) - assert.Equal(t, 3, len(s.hashingTableMap()), "this doesn't delete empty consistent hashing table") + assert.Len(t, s.Members()[testMember.Name].Entities, 1) + assert.Len(t, s.hashingTableMap(), 3, "this doesn't delete empty consistent hashing table") }) } @@ -142,9 +142,9 @@ func TestRemoveMember(t *testing.T) { }) // assert - assert.Equal(t, 1, len(s.Members())) + assert.Len(t, s.Members(), 1) assert.True(t, updated) - assert.Equal(t, 2, len(s.hashingTableMap())) + assert.Len(t, s.hashingTableMap(), 2) // act updated = s.removeMember(&DaprHostMember{ @@ -152,9 +152,9 @@ func TestRemoveMember(t *testing.T) { }) // assert - assert.Equal(t, 0, len(s.Members())) + assert.Empty(t, s.Members()) assert.True(t, updated) - assert.Equal(t, 0, len(s.hashingTableMap())) + assert.Empty(t, s.hashingTableMap()) }) t.Run("no table update required", func(t *testing.T) { @@ -164,9 +164,9 @@ func TestRemoveMember(t *testing.T) { }) // assert - assert.Equal(t, 0, len(s.Members())) + assert.Empty(t, s.Members()) assert.False(t, updated) - assert.Equal(t, 0, len(s.hashingTableMap())) + assert.Empty(t, s.hashingTableMap()) }) } @@ -186,7 +186,7 @@ func TestUpdateHashingTable(t *testing.T) { // act s.updateHashingTables(testMember) - assert.Equal(t, 2, len(s.hashingTableMap())) + assert.Len(t, s.hashingTableMap(), 2) for _, ent := range testMember.Entities { assert.NotNil(t, s.hashingTableMap()[ent]) } @@ -202,7 +202,7 @@ func TestUpdateHashingTable(t *testing.T) { // act s.updateHashingTables(testMember) - assert.Equal(t, 3, len(s.hashingTableMap())) + assert.Len(t, s.hashingTableMap(), 3) for _, ent := range testMember.Entities { assert.NotNil(t, s.hashingTableMap()[ent]) } @@ -239,7 +239,7 @@ func TestRemoveHashingTable(t *testing.T) { testMember.Name = tc.name s.removeHashingTables(testMember) - assert.Equal(t, tc.totalTable, len(s.hashingTableMap())) + assert.Len(t, s.hashingTableMap(), tc.totalTable) }) } } @@ -267,11 +267,11 @@ func TestRestoreHashingTables(t *testing.T) { } s.lock.Unlock() } - assert.Equal(t, 0, len(s.hashingTableMap())) + assert.Empty(t, s.hashingTableMap()) // act s.restoreHashingTables() // assert - assert.Equal(t, 2, len(s.hashingTableMap())) + assert.Len(t, s.hashingTableMap(), 2) } diff --git a/pkg/placement/raft/util_test.go b/pkg/placement/raft/util_test.go index 2b3cae60237..2eb379160c5 100644 --- a/pkg/placement/raft/util_test.go +++ b/pkg/placement/raft/util_test.go @@ -19,34 +19,35 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestEnsureDir(t *testing.T) { testDir := "_testDir" t.Run("create dir successfully", func(t *testing.T) { err := ensureDir(testDir) - assert.NoError(t, err) + require.NoError(t, err) err = os.Remove(testDir) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("ensure the existing directory", func(t *testing.T) { err := os.Mkdir(testDir, 0o700) - assert.NoError(t, err) + require.NoError(t, err) err = ensureDir(testDir) - assert.NoError(t, err) + require.NoError(t, err) err = os.Remove(testDir) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("fails to create dir", func(t *testing.T) { file, err := os.Create(testDir) - assert.NoError(t, err) + require.NoError(t, err) file.Close() err = ensureDir(testDir) - assert.Error(t, err) + require.Error(t, err) err = os.Remove(testDir) - assert.NoError(t, err) + require.NoError(t, err) }) } @@ -95,11 +96,11 @@ func TestMarshalAndUnmarshalMsgpack(t *testing.T) { } encoded, err := marshalMsgPack(testObject) - assert.NoError(t, err) + require.NoError(t, err) var decoded testStruct err = unmarshalMsgPack(encoded, &decoded) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, testObject.Name, decoded.Name) assert.Equal(t, testObject.StringArrayList, decoded.StringArrayList) diff --git a/pkg/resiliency/breaker/circuitbreaker_test.go b/pkg/resiliency/breaker/circuitbreaker_test.go index 60202544e31..c5f863c4dba 100644 --- a/pkg/resiliency/breaker/circuitbreaker_test.go +++ b/pkg/resiliency/breaker/circuitbreaker_test.go @@ -53,7 +53,7 @@ func TestCircuitBreaker(t *testing.T) { return "❌", nil }) assert.Equal(t, breaker.StateOpen, cb.State()) - assert.EqualError(t, err, "circuit breaker is open") + require.EqualError(t, err, "circuit breaker is open") assert.Nil(t, res) time.Sleep(500 * time.Millisecond) @@ -61,6 +61,6 @@ func TestCircuitBreaker(t *testing.T) { res, err = cb.Execute(func() (any, error) { return 42, nil }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 42, res) } diff --git a/pkg/resiliency/policy_test.go b/pkg/resiliency/policy_test.go index b4a6e7308f1..6a765796961 100644 --- a/pkg/resiliency/policy_test.go +++ b/pkg/resiliency/policy_test.go @@ -23,6 +23,7 @@ import ( "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/exp/slices" "github.com/dapr/dapr/pkg/resiliency/breaker" @@ -309,9 +310,9 @@ func TestPolicyAccumulator(t *testing.T) { // Sleep a bit to ensure that things in the background aren't continuing time.Sleep(100 * time.Millisecond) - assert.NoError(t, err) + require.NoError(t, err) // res should contain only the last result, i.e. 4 - assert.Equal(t, res, int32(4)) + assert.Equal(t, int32(4), res) assert.Equal(t, 3, accumulatorCalled) assert.Equal(t, int32(5), fnCalled.Load()) assert.Equal(t, []int32{1, 3, 4}, received) @@ -345,9 +346,9 @@ func TestPolicyDisposer(t *testing.T) { }) res, err := policy(fn) - assert.NoError(t, err) + require.NoError(t, err) // res should contain only the last result, i.e. 4 - assert.Equal(t, res, int32(4)) + assert.Equal(t, int32(4), res) // The disposer should be 3 times called with values 1, 2, 3 disposed := []int32{} diff --git a/pkg/resiliency/resiliency_test.go b/pkg/resiliency/resiliency_test.go index 4a3f87ef238..1266797f022 100644 --- a/pkg/resiliency/resiliency_test.go +++ b/pkg/resiliency/resiliency_test.go @@ -216,7 +216,7 @@ func TestPoliciesForTargets(t *testing.T) { called.Store(true) return nil, nil }) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, called.Load()) }) } @@ -225,7 +225,7 @@ func TestPoliciesForTargets(t *testing.T) { func TestLoadKubernetesResiliency(t *testing.T) { port, _ := freeport.GetFreePort() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - assert.NoError(t, err) + require.NoError(t, err) s := grpc.NewServer() operatorv1pb.RegisterOperatorServer(s, &mockOperator{}) @@ -259,7 +259,7 @@ func TestLoadStandaloneResiliency(t *testing.T) { t.Run("test load resiliency skips other types", func(t *testing.T) { configs := LoadLocalResiliency(log, "app1", "../components") assert.NotNil(t, configs) - assert.Len(t, configs, 0) + assert.Empty(t, configs) }) } @@ -290,10 +290,10 @@ func TestParseActorCircuitBreakerScope(t *testing.T) { t.Run(tt.input, func(t *testing.T) { actual, err := ParseActorCircuitBreakerScope(tt.input) if tt.err == "" { - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, tt.output, actual) } else { - assert.EqualError(t, err, tt.err) + require.EqualError(t, err, tt.err) } }) } @@ -306,7 +306,7 @@ func TestParseMaxRetries(t *testing.T) { require.NotNil(t, configs[0]) r := FromConfigurations(log, configs[0]) - require.True(t, len(r.retries) > 0) + require.NotEmpty(t, r.retries) require.NotNil(t, r.retries["noRetry"]) require.NotNil(t, r.retries["retryForever"]) require.NotNil(t, r.retries["missingMaxRetries"]) @@ -325,7 +325,7 @@ func TestParseMaxRetries(t *testing.T) { func TestResiliencyScopeIsRespected(t *testing.T) { port, _ := freeport.GetFreePort() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - assert.NoError(t, err) + require.NoError(t, err) s := grpc.NewServer() operatorv1pb.RegisterOperatorServer(s, &mockOperator{}) diff --git a/pkg/responsewriter/response_writer_test.go b/pkg/responsewriter/response_writer_test.go index a74710ce49d..08e3da3454b 100644 --- a/pkg/responsewriter/response_writer_test.go +++ b/pkg/responsewriter/response_writer_test.go @@ -26,8 +26,8 @@ func TestResponseWriterBeforeWrite(t *testing.T) { rec := httptest.NewRecorder() rw := NewResponseWriter(rec) - require.Equal(t, rw.Status(), 0) - require.Equal(t, rw.Written(), false) + require.Equal(t, 0, rw.Status()) + require.False(t, rw.Written()) } func TestResponseWriterBeforeFuncHasAccessToStatus(t *testing.T) { @@ -41,7 +41,7 @@ func TestResponseWriterBeforeFuncHasAccessToStatus(t *testing.T) { }) rw.WriteHeader(http.StatusCreated) - require.Equal(t, status, http.StatusCreated) + require.Equal(t, http.StatusCreated, status) } func TestResponseWriterBeforeFuncCanChangeStatus(t *testing.T) { @@ -54,7 +54,7 @@ func TestResponseWriterBeforeFuncCanChangeStatus(t *testing.T) { }) rw.WriteHeader(http.StatusBadRequest) - require.Equal(t, rec.Code, http.StatusOK) + require.Equal(t, http.StatusOK, rec.Code) } func TestResponseWriterBeforeFuncChangesStatusMultipleTimes(t *testing.T) { @@ -69,7 +69,7 @@ func TestResponseWriterBeforeFuncChangesStatusMultipleTimes(t *testing.T) { }) rw.WriteHeader(http.StatusOK) - require.Equal(t, rec.Code, http.StatusNotFound) + require.Equal(t, http.StatusNotFound, rec.Code) } func TestResponseWriterWritingString(t *testing.T) { @@ -79,10 +79,10 @@ func TestResponseWriterWritingString(t *testing.T) { rw.Write([]byte("Hello world")) require.Equal(t, rec.Code, rw.Status()) - require.Equal(t, rec.Body.String(), "Hello world") - require.Equal(t, rw.Status(), http.StatusOK) - require.Equal(t, rw.Size(), 11) - require.Equal(t, rw.Written(), true) + require.Equal(t, "Hello world", rec.Body.String()) + require.Equal(t, http.StatusOK, rw.Status()) + require.Equal(t, 11, rw.Size()) + require.True(t, rw.Written()) } func TestResponseWriterWritingStrings(t *testing.T) { @@ -93,9 +93,9 @@ func TestResponseWriterWritingStrings(t *testing.T) { rw.Write([]byte("foo bar bat baz")) require.Equal(t, rec.Code, rw.Status()) - require.Equal(t, rec.Body.String(), "Hello worldfoo bar bat baz") - require.Equal(t, rw.Status(), http.StatusOK) - require.Equal(t, rw.Size(), 26) + require.Equal(t, "Hello worldfoo bar bat baz", rec.Body.String()) + require.Equal(t, http.StatusOK, rw.Status()) + require.Equal(t, 26, rw.Size()) } func TestResponseWriterWritingHeader(t *testing.T) { @@ -105,9 +105,9 @@ func TestResponseWriterWritingHeader(t *testing.T) { rw.WriteHeader(http.StatusNotFound) require.Equal(t, rec.Code, rw.Status()) - require.Equal(t, rec.Body.String(), "") - require.Equal(t, rw.Status(), http.StatusNotFound) - require.Equal(t, rw.Size(), 0) + require.Equal(t, "", rec.Body.String()) + require.Equal(t, http.StatusNotFound, rw.Status()) + require.Equal(t, 0, rw.Size()) } func TestResponseWriterWritingHeaderTwice(t *testing.T) { @@ -117,10 +117,10 @@ func TestResponseWriterWritingHeaderTwice(t *testing.T) { rw.WriteHeader(http.StatusNotFound) rw.WriteHeader(http.StatusInternalServerError) - require.Equal(t, rec.Code, rw.Status()) - require.Equal(t, rec.Body.String(), "") - require.Equal(t, rw.Status(), http.StatusNotFound) - require.Equal(t, rw.Size(), 0) + require.Equal(t, rw.Status(), rec.Code) + require.Equal(t, "", rec.Body.String()) + require.Equal(t, http.StatusNotFound, rw.Status()) + require.Equal(t, 0, rw.Size()) } func TestResponseWriterBefore(t *testing.T) { @@ -138,10 +138,10 @@ func TestResponseWriterBefore(t *testing.T) { rw.WriteHeader(http.StatusNotFound) require.Equal(t, rec.Code, rw.Status()) - require.Equal(t, rec.Body.String(), "") - require.Equal(t, rw.Status(), http.StatusNotFound) - require.Equal(t, rw.Size(), 0) - require.Equal(t, result, "barfoo") + require.Equal(t, "", rec.Body.String()) + require.Equal(t, http.StatusNotFound, rw.Status()) + require.Equal(t, 0, rw.Size()) + require.Equal(t, "barfoo", result) } func TestResponseWriterUnwrap(t *testing.T) { @@ -177,12 +177,12 @@ func TestResponseWriterWithoutReadFrom(t *testing.T) { rw := NewResponseWriter(rec) n, err := io.Copy(rw, &mockReader{readStr: writeString}) - require.Equal(t, err, nil) - require.Equal(t, rw.Status(), http.StatusOK) - require.Equal(t, rw.Written(), true) - require.Equal(t, rw.Size(), len(writeString)) - require.Equal(t, int(n), len(writeString)) - require.Equal(t, rec.Body.String(), writeString) + require.NoError(t, err) + require.Equal(t, http.StatusOK, rw.Status()) + require.True(t, rw.Written()) + require.Len(t, writeString, rw.Size()) + require.Len(t, writeString, int(n)) + require.Equal(t, writeString, rec.Body.String()) } type mockResponseWriterWithReadFrom struct { @@ -205,11 +205,11 @@ func TestResponseWriterWithReadFrom(t *testing.T) { mrw := &mockResponseWriterWithReadFrom{ResponseRecorder: httptest.NewRecorder()} rw := NewResponseWriter(mrw) n, err := io.Copy(rw, &mockReader{readStr: writeString}) - require.Equal(t, err, nil) - require.Equal(t, rw.Status(), http.StatusOK) - require.Equal(t, rw.Written(), true) - require.Equal(t, rw.Size(), len(writeString)) - require.Equal(t, int(n), len(writeString)) - require.Equal(t, mrw.Body.String(), writeString) - require.Equal(t, mrw.writtenStr, writeString) + require.NoError(t, err) + require.Equal(t, http.StatusOK, rw.Status()) + require.True(t, rw.Written()) + require.Len(t, writeString, rw.Size()) + require.Len(t, writeString, int(n)) + require.Equal(t, writeString, mrw.Body.String()) + require.Equal(t, writeString, mrw.writtenStr) } diff --git a/pkg/runtime/channels/channels_test.go b/pkg/runtime/channels/channels_test.go index 88088a41765..f46c0e75a03 100644 --- a/pkg/runtime/channels/channels_test.go +++ b/pkg/runtime/channels/channels_test.go @@ -70,7 +70,7 @@ func TestMiddlewareBuildPipeline(t *testing.T) { }, }, "test") require.NoError(t, err) - assert.Len(t, pipeline.Handlers, 0) + assert.Empty(t, pipeline.Handlers) }) compStore := compstore.New() @@ -232,7 +232,7 @@ func TestGetAppHTTPChannelConfigWithCustomChannel(t *testing.T) { } p, err := ch.BuildHTTPPipeline(&config.PipelineSpec{}) - assert.Nil(t, err) + require.NoError(t, err) c := ch.appHTTPChannelConfig(p) assert.Equal(t, "http://my.app:0", c.Endpoint) @@ -285,7 +285,7 @@ func TestGetHTTPEndpointAppChannel(t *testing.T) { Spec: httpendpapi.HTTPEndpointSpec{}, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, conf.Client.Transport.(*http.Transport).TLSClientConfig) }) @@ -366,7 +366,7 @@ func TestGetHTTPEndpointAppChannel(t *testing.T) { }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, conf.Client.Transport.(*http.Transport).TLSClientConfig) }) @@ -409,6 +409,6 @@ func TestGetHTTPEndpointAppChannel(t *testing.T) { }, }) - assert.Error(t, err) + require.Error(t, err) }) } diff --git a/pkg/runtime/config_test.go b/pkg/runtime/config_test.go index 56ff7722df6..af56a77252f 100644 --- a/pkg/runtime/config_test.go +++ b/pkg/runtime/config_test.go @@ -73,23 +73,23 @@ func Test_toInternal(t *testing.T) { assert.Equal(t, "1.2.3.4", intc.apiListenAddresses[0]) assert.Equal(t, 8080, intc.appConnectionConfig.Port) assert.Equal(t, 7070, intc.profilePort) - assert.Equal(t, true, intc.enableProfiling) + assert.True(t, intc.enableProfiling) assert.Equal(t, 1, intc.appConnectionConfig.MaxConcurrency) - assert.Equal(t, true, intc.mTLSEnabled) + assert.True(t, intc.mTLSEnabled) assert.Equal(t, "localhost:5052", intc.sentryServiceAddress) assert.Equal(t, 4, intc.maxRequestBodySize) assert.Equal(t, "", intc.unixDomainSocket) assert.Equal(t, 4, intc.readBufferSize) assert.Equal(t, time.Second, intc.gracefulShutdownDuration) assert.Equal(t, ptr.Of(true), intc.enableAPILogging) - assert.Equal(t, true, intc.disableBuiltinK8sSecretStore) + assert.True(t, intc.disableBuiltinK8sSecretStore) assert.Equal(t, "1.1.1.1", intc.appConnectionConfig.ChannelAddress) } func TestStandaloneWasmStrictSandbox(t *testing.T) { global, err := config.LoadStandaloneConfiguration("../config/testdata/wasm_strict_sandbox.yaml") - assert.Nil(t, err) + require.NoError(t, err) assert.True(t, global.Spec.WasmSpec.StrictSandbox) } diff --git a/pkg/runtime/meta/meta_test.go b/pkg/runtime/meta/meta_test.go index ac0cd85322d..8cb400b1c54 100644 --- a/pkg/runtime/meta/meta_test.go +++ b/pkg/runtime/meta/meta_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "github.com/dapr/dapr/pkg/apis/common" @@ -38,8 +39,8 @@ func TestMetadataItemsToPropertiesConversion(t *testing.T) { }, } m, err := meta.convertItemsToProps(items) - assert.NoError(t, err) - assert.Equal(t, 1, len(m)) + require.NoError(t, err) + assert.Len(t, m, 1) assert.Equal(t, "b", m["a"]) }) @@ -54,8 +55,8 @@ func TestMetadataItemsToPropertiesConversion(t *testing.T) { }, } m, err := meta.convertItemsToProps(items) - assert.NoError(t, err) - assert.Equal(t, 1, len(m)) + require.NoError(t, err) + assert.Len(t, m, 1) assert.Equal(t, "6", m["a"]) }) @@ -70,8 +71,8 @@ func TestMetadataItemsToPropertiesConversion(t *testing.T) { }, } m, err := meta.convertItemsToProps(items) - assert.NoError(t, err) - assert.Equal(t, 1, len(m)) + require.NoError(t, err) + assert.Len(t, m, 1) assert.Equal(t, "true", m["a"]) }) @@ -86,8 +87,8 @@ func TestMetadataItemsToPropertiesConversion(t *testing.T) { }, } m, err := meta.convertItemsToProps(items) - assert.NoError(t, err) - assert.Equal(t, 1, len(m)) + require.NoError(t, err) + assert.Len(t, m, 1) assert.Equal(t, "5.5", m["a"]) }) @@ -102,8 +103,8 @@ func TestMetadataItemsToPropertiesConversion(t *testing.T) { }, } m, err := meta.convertItemsToProps(items) - assert.NoError(t, err) - assert.Equal(t, 1, len(m)) + require.NoError(t, err) + assert.Len(t, m, 1) assert.Equal(t, "hello there", m["a"]) }) } @@ -157,7 +158,7 @@ func TestMetadataOverrideWasmStrictSandbox(t *testing.T) { // check that WasmStrictSandbox is set to true base, err := meta.ToBaseMetadata(com) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "true", base.Properties[WasmStrictSandboxMetadataKey]) }) @@ -183,7 +184,7 @@ func TestMetadataOverrideWasmStrictSandbox(t *testing.T) { // check that WasmStrictSandbox is set to true base, err := meta.ToBaseMetadata(com) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "true", base.Properties[WasmStrictSandboxMetadataKey]) }) @@ -217,8 +218,8 @@ func TestMetadataOverrideWasmStrictSandbox(t *testing.T) { wasm, err := meta.ToBaseMetadata(com) noneWasm, err2 := meta.ToBaseMetadata(noneWasmComp) - assert.NoError(t, err) - assert.NoError(t, err2) + require.NoError(t, err) + require.NoError(t, err2) assert.Equal(t, "true", wasm.Properties[WasmStrictSandboxMetadataKey]) assert.Equal(t, "", noneWasm.Properties[WasmStrictSandboxMetadataKey]) }) diff --git a/pkg/runtime/processor/binding/init_test.go b/pkg/runtime/processor/binding/init_test.go index 4b638d5e7ed..2c685c4aab0 100644 --- a/pkg/runtime/processor/binding/init_test.go +++ b/pkg/runtime/processor/binding/init_test.go @@ -17,7 +17,7 @@ import ( "context" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/dapr/components-contrib/bindings" compapi "github.com/dapr/dapr/pkg/apis/components/v1alpha1" @@ -50,7 +50,7 @@ func TestInitBindings(t *testing.T) { c.ObjectMeta.Name = "testInputBinding" c.Spec.Type = "bindings.testInputBinding" err := proc.Init(context.TODO(), c) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("single output binding", func(t *testing.T) { @@ -72,7 +72,7 @@ func TestInitBindings(t *testing.T) { c.ObjectMeta.Name = "testOutputBinding" c.Spec.Type = "bindings.testOutputBinding" err := proc.Init(context.TODO(), c) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("one input binding, one output binding", func(t *testing.T) { @@ -101,13 +101,13 @@ func TestInitBindings(t *testing.T) { input.ObjectMeta.Name = "testinput" input.Spec.Type = "bindings.testinput" err := proc.Init(context.TODO(), input) - assert.NoError(t, err) + require.NoError(t, err) output := compapi.Component{} output.ObjectMeta.Name = "testoutput" output.Spec.Type = "bindings.testoutput" err = proc.Init(context.TODO(), output) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("one not exist binding", func(t *testing.T) { @@ -125,6 +125,6 @@ func TestInitBindings(t *testing.T) { c.ObjectMeta.Name = "testNotExistBinding" c.Spec.Type = "bindings.testNotExistBinding" err := proc.Init(context.TODO(), c) - assert.Error(t, err) + require.Error(t, err) }) } diff --git a/pkg/runtime/processor/binding/send.go b/pkg/runtime/processor/binding/send.go index 1a04876f116..30b6ea24cee 100644 --- a/pkg/runtime/processor/binding/send.go +++ b/pkg/runtime/processor/binding/send.go @@ -293,19 +293,19 @@ func (b *binding) sendBindingEventToApp(ctx context.Context, bindingName string, return nil, fmt.Errorf("error invoking app: %w", err) } if resp != nil { - if resp.Concurrency == runtimev1pb.BindingEventResponse_PARALLEL { //nolint:nosnakecase + if resp.GetConcurrency() == runtimev1pb.BindingEventResponse_PARALLEL { //nolint:nosnakecase response.Concurrency = ConcurrencyParallel } else { response.Concurrency = ConcurrencySequential } - response.To = resp.To + response.To = resp.GetTo() - if resp.Data != nil { - appResponseBody = resp.Data + if resp.GetData() != nil { + appResponseBody = resp.GetData() var d interface{} - err := json.Unmarshal(resp.Data, &d) + err := json.Unmarshal(resp.GetData(), &d) if err == nil { response.Data = d } @@ -339,8 +339,8 @@ func (b *binding) sendBindingEventToApp(ctx context.Context, bindingName string, if rErr != nil { return rResp, rErr } - if rResp != nil && rResp.Status().Code != http.StatusOK { - return rResp, fmt.Errorf("%w, status %d", respErr, rResp.Status().Code) + if rResp != nil && rResp.Status().GetCode() != http.StatusOK { + return rResp, fmt.Errorf("%w, status %d", respErr, rResp.Status().GetCode()) } return rResp, nil }) @@ -359,15 +359,15 @@ func (b *binding) sendBindingEventToApp(ctx context.Context, bindingName string, http.MethodPost+" /"+bindingName, ) diag.AddAttributesToSpan(span, m) - diag.UpdateSpanStatusFromHTTPStatus(span, int(resp.Status().Code)) + diag.UpdateSpanStatusFromHTTPStatus(span, int(resp.Status().GetCode())) span.End() } appResponseBody, err = resp.RawDataFull() // ::TODO report metrics for http, such as grpc - if resp.Status().Code < 200 || resp.Status().Code > 299 { - return nil, fmt.Errorf("fails to send binding event to http app channel, status code: %d body: %s", resp.Status().Code, string(appResponseBody)) + if resp.Status().GetCode() < 200 || resp.Status().GetCode() > 299 { + return nil, fmt.Errorf("fails to send binding event to http app channel, status code: %d body: %s", resp.Status().GetCode(), string(appResponseBody)) } if err != nil { @@ -414,7 +414,7 @@ func (b *binding) getSubscribedBindingsGRPC(ctx context.Context) ([]string, erro bindings := []string{} if err == nil && resp != nil { - bindings = resp.Bindings + bindings = resp.GetBindings() } return bindings, nil } @@ -447,7 +447,7 @@ func (b *binding) isAppSubscribedToBinding(ctx context.Context, binding string) log.Fatalf("could not invoke OPTIONS method on input binding subscription endpoint %q: %v", path, err) } defer resp.Close() - code := resp.Status().Code + code := resp.Status().GetCode() return code/100 == 2 || code == http.StatusMethodNotAllowed, nil } diff --git a/pkg/runtime/processor/binding/send_test.go b/pkg/runtime/processor/binding/send_test.go index 771fa9df05d..64183defbc1 100644 --- a/pkg/runtime/processor/binding/send_test.go +++ b/pkg/runtime/processor/binding/send_test.go @@ -130,7 +130,7 @@ func TestStartReadingFromBindings(t *testing.T) { b.compStore.AddInputBinding("test", m) err := b.StartReadingFromBindings(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything)) }) @@ -172,7 +172,7 @@ func TestStartReadingFromBindings(t *testing.T) { require.NoError(t, b.compStore.CommitPendingComponent()) err := b.StartReadingFromBindings(context.Background()) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, mockAppChannel.AssertNotCalled(t, "InvokeMethod", mock.Anything, mock.Anything)) }) } @@ -418,7 +418,7 @@ func TestInvokeOutputBindings(t *testing.T) { _, err := b.SendToOutputBinding(context.Background(), "mockBinding", &bindings.InvokeRequest{ Data: []byte(""), }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, "operation field is missing from request", err.Error()) }) @@ -438,7 +438,7 @@ func TestInvokeOutputBindings(t *testing.T) { Data: []byte(""), Operation: bindings.CreateOperation, }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("output binding invalid operation", func(t *testing.T) { @@ -457,7 +457,7 @@ func TestInvokeOutputBindings(t *testing.T) { Data: []byte(""), Operation: bindings.GetOperation, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, "binding mockBinding does not support operation get. supported operations:create list", err.Error()) }) } @@ -476,12 +476,12 @@ func TestBindingTracingHttp(t *testing.T) { b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) _, err := b.sendBindingEventToApp(context.Background(), "mockBinding", []byte(""), map[string]string{"traceparent": "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01"}) - assert.NoError(t, err) + require.NoError(t, err) mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything) assert.Len(t, mockAppChannel.Calls, 1) req := mockAppChannel.Calls[0].Arguments.Get(1).(*invokev1.InvokeMethodRequest) assert.Contains(t, req.Metadata(), "traceparent") - assert.Contains(t, req.Metadata()["traceparent"].Values, "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01") + assert.Contains(t, req.Metadata()["traceparent"].GetValues(), "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01") }) t.Run("traceparent passed through with response status code 204", func(t *testing.T) { @@ -490,12 +490,12 @@ func TestBindingTracingHttp(t *testing.T) { b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) _, err := b.sendBindingEventToApp(context.Background(), "mockBinding", []byte(""), map[string]string{"traceparent": "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01"}) - assert.NoError(t, err) + require.NoError(t, err) mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything) assert.Len(t, mockAppChannel.Calls, 1) req := mockAppChannel.Calls[0].Arguments.Get(1).(*invokev1.InvokeMethodRequest) assert.Contains(t, req.Metadata(), "traceparent") - assert.Contains(t, req.Metadata()["traceparent"].Values, "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01") + assert.Contains(t, req.Metadata()["traceparent"].GetValues(), "00-d97eeaf10b4d00dc6ba794f3a41c5268-09462d216dd14deb-01") }) t.Run("bad traceparent does not fail request", func(t *testing.T) { @@ -504,7 +504,7 @@ func TestBindingTracingHttp(t *testing.T) { b.channels = new(channels.Channels).WithAppChannel(mockAppChannel) _, err := b.sendBindingEventToApp(context.Background(), "mockBinding", []byte(""), map[string]string{"traceparent": "I am not a traceparent"}) - assert.NoError(t, err) + require.NoError(t, err) mockAppChannel.AssertCalled(t, "InvokeMethod", mock.Anything, mock.Anything) assert.Len(t, mockAppChannel.Calls, 1) }) @@ -560,7 +560,7 @@ func TestBindingResiliency(t *testing.T) { output.ObjectMeta.Name = "failOutput" output.Spec.Type = "bindings.failingoutput" err := b.Init(context.TODO(), output) - assert.NoError(t, err) + require.NoError(t, err) t.Run("output binding retries on failure with resiliency", func(t *testing.T) { req := &bindings.InvokeRequest{ @@ -569,7 +569,7 @@ func TestBindingResiliency(t *testing.T) { } _, err := b.SendToOutputBinding(context.Background(), "failOutput", req) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingBinding.Failure.CallCount("outputFailingKey")) }) @@ -582,7 +582,7 @@ func TestBindingResiliency(t *testing.T) { _, err := b.SendToOutputBinding(context.Background(), "failOutput", req) end := time.Now() - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, 2, failingBinding.Failure.CallCount("outputTimeoutKey")) assert.Less(t, end.Sub(start), time.Second*10) }) @@ -590,7 +590,7 @@ func TestBindingResiliency(t *testing.T) { t.Run("input binding retries on failure with resiliency", func(t *testing.T) { _, err := b.sendBindingEventToApp(context.Background(), "failingInputBinding", []byte("inputFailingKey"), map[string]string{}) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingChannel.Failure.CallCount("inputFailingKey")) }) @@ -599,7 +599,7 @@ func TestBindingResiliency(t *testing.T) { _, err := b.sendBindingEventToApp(context.Background(), "failingInputBinding", []byte("inputTimeoutKey"), map[string]string{}) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingChannel.Failure.CallCount("inputTimeoutKey")) assert.Less(t, end.Sub(start), time.Second*10) }) @@ -607,7 +607,7 @@ func TestBindingResiliency(t *testing.T) { func matchDaprRequestMethod(method string) any { return mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - if req == nil || req.Message() == nil || req.Message().Method != method { + if req == nil || req.Message() == nil || req.Message().GetMethod() != method { return false } return true diff --git a/pkg/runtime/processor/processor_test.go b/pkg/runtime/processor/processor_test.go index b07913c0aa6..10816c0026e 100644 --- a/pkg/runtime/processor/processor_test.go +++ b/pkg/runtime/processor/processor_test.go @@ -22,6 +22,7 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -82,7 +83,7 @@ func TestProcessComponentsAndDependents(t *testing.T) { t.Run("test incorrect type", func(t *testing.T) { err := proc.processComponentAndDependents(context.Background(), incorrectComponentType) - assert.Error(t, err, "expected an error") + require.Error(t, err, "expected an error") assert.Equal(t, "incorrect type pubsubs.mockPubSub", err.Error(), "expected error strings to match") }) } @@ -107,7 +108,7 @@ func TestInitSecretStores(t *testing.T) { Version: "v1", }, }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("secret store is registered", func(t *testing.T) { @@ -129,7 +130,7 @@ func TestInitSecretStores(t *testing.T) { Version: "v1", }, }) - assert.NoError(t, err) + require.NoError(t, err) store, ok := proc.compStore.GetSecretStore("kubernetesMock") assert.True(t, ok) assert.NotNil(t, store) @@ -239,15 +240,15 @@ func TestMetadataUUID(t *testing.T) { consumerID := metadata.Properties["consumerID"] var uuid0, uuid1, uuid2 uuid.UUID uuid0, err := uuid.Parse(consumerID) - assert.NoError(t, err) + require.NoError(t, err) twoUUIDs := metadata.Properties["twoUUIDs"] uuids := strings.Split(twoUUIDs, " ") - assert.Equal(t, 2, len(uuids)) + assert.Len(t, uuids, 2) uuid1, err = uuid.Parse(uuids[0]) - assert.NoError(t, err) + require.NoError(t, err) uuid2, err = uuid.Parse(uuids[1]) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEqual(t, uuid0, uuid1) assert.NotEqual(t, uuid0, uuid2) @@ -255,7 +256,7 @@ func TestMetadataUUID(t *testing.T) { }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) - assert.NoError(t, err) + require.NoError(t, err) } func TestMetadataPodName(t *testing.T) { @@ -300,7 +301,7 @@ func TestMetadataPodName(t *testing.T) { }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) - assert.NoError(t, err) + require.NoError(t, err) } func TestMetadataNamespace(t *testing.T) { @@ -346,7 +347,7 @@ func TestMetadataNamespace(t *testing.T) { }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) - assert.NoError(t, err) + require.NoError(t, err) } func TestMetadataClientID(t *testing.T) { @@ -394,7 +395,7 @@ func TestMetadataClientID(t *testing.T) { }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) - assert.NoError(t, err) + require.NoError(t, err) select { case clientID := <-clientIDChan: @@ -436,9 +437,9 @@ func TestMetadataClientID(t *testing.T) { }) err := proc.processComponentAndDependents(context.Background(), pubsubComponent) - assert.NoError(t, err) + require.NoError(t, err) appIds := strings.Split(standAloneClientID, " ") - assert.Equal(t, 2, len(appIds)) + assert.Len(t, appIds, 2) for _, appID := range appIds { assert.Equal(t, daprt.TestRuntimeConfigID, appID) } diff --git a/pkg/runtime/processor/pubsub/bulk_subscriber.go b/pkg/runtime/processor/pubsub/bulk_subscriber.go index d6f482e1f4e..503ceb93d6f 100644 --- a/pkg/runtime/processor/pubsub/bulk_subscriber.go +++ b/pkg/runtime/processor/pubsub/bulk_subscriber.go @@ -190,7 +190,7 @@ func (p *pubsub) bulkSubscribeTopic(ctx context.Context, policyDef *resiliency.P Data: message.Event, Topic: topic, Metadata: message.Metadata, - ContentType: &message.ContentType, + ContentType: &msg.Entries[i].ContentType, }, route.DeadLetterTopic) } bulkResponses[i].EntryId = message.EntryId @@ -399,7 +399,7 @@ func (p *pubsub) publishBulkMessageHTTP(ctx context.Context, bulkSubCallData *bu } defer resp.Close() - statusCode := int(resp.Status().Code) + statusCode := int(resp.Status().GetCode()) for _, span := range spans { m := diag.ConstructSubscriptionSpanAttributes(psm.topic) @@ -573,7 +573,7 @@ func (p *pubsub) publishBulkMessageGRPC(ctx context.Context, bulkSubCallData *bu elapsed := diag.ElapsedSince(start) for _, span := range spans { - m := diag.ConstructSubscriptionSpanAttributes(envelope.Topic) + m := diag.ConstructSubscriptionSpanAttributes(envelope.GetTopic()) diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromGRPCError(span, err) } @@ -600,27 +600,27 @@ func (p *pubsub) publishBulkMessageGRPC(ctx context.Context, bulkSubCallData *bu hasAnyError := false for _, response := range res.GetStatuses() { - if _, ok := (*bscData.entryIdIndexMap)[response.EntryId]; ok { + if _, ok := (*bscData.entryIdIndexMap)[response.GetEntryId()]; ok { switch response.GetStatus() { case runtimev1pb.TopicEventResponse_SUCCESS: //nolint:nosnakecase // on uninitialized status, this is the case it defaults to as an uninitialized status defaults to 0 which is // success from protobuf definition bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Success)] += 1 - entryRespReceived[response.EntryId] = true - addBulkResponseEntry(bulkResponses, response.EntryId, nil) + entryRespReceived[response.GetEntryId()] = true + addBulkResponseEntry(bulkResponses, response.GetEntryId(), nil) case runtimev1pb.TopicEventResponse_RETRY: //nolint:nosnakecase bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += 1 - entryRespReceived[response.EntryId] = true - addBulkResponseEntry(bulkResponses, response.EntryId, - fmt.Errorf("RETRY status returned from app while processing pub/sub event for entry id: %v", response.EntryId)) + entryRespReceived[response.GetEntryId()] = true + addBulkResponseEntry(bulkResponses, response.GetEntryId(), + fmt.Errorf("RETRY status returned from app while processing pub/sub event for entry id: %v", response.GetEntryId())) hasAnyError = true case runtimev1pb.TopicEventResponse_DROP: //nolint:nosnakecase - log.Warnf("DROP status returned from app while processing pub/sub event for entry id: %v", response.EntryId) + log.Warnf("DROP status returned from app while processing pub/sub event for entry id: %v", response.GetEntryId()) bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)] += 1 - entryRespReceived[response.EntryId] = true - addBulkResponseEntry(bulkResponses, response.EntryId, nil) + entryRespReceived[response.GetEntryId()] = true + addBulkResponseEntry(bulkResponses, response.GetEntryId(), nil) if deadLetterTopic != "" { - msg := psm.pubSubMessages[(*bscData.entryIdIndexMap)[response.EntryId]] + msg := psm.pubSubMessages[(*bscData.entryIdIndexMap)[response.GetEntryId()]] _ = p.sendToDeadLetter(ctx, bscData.psName, &contribpubsub.NewMessage{ Data: msg.entry.Event, Topic: bscData.topic, @@ -631,13 +631,13 @@ func (p *pubsub) publishBulkMessageGRPC(ctx context.Context, bulkSubCallData *bu default: // Consider unknown status field as error and retry bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += 1 - entryRespReceived[response.EntryId] = true - addBulkResponseEntry(bulkResponses, response.EntryId, - fmt.Errorf("unknown status returned from app while processing pub/sub event for entry id %v: %v", response.EntryId, response.GetStatus())) + entryRespReceived[response.GetEntryId()] = true + addBulkResponseEntry(bulkResponses, response.GetEntryId(), + fmt.Errorf("unknown status returned from app while processing pub/sub event for entry id %v: %v", response.GetEntryId(), response.GetStatus())) hasAnyError = true } } else { - log.Warnf("Invalid entry id received from app while processing pub/sub event %v", response.EntryId) + log.Warnf("Invalid entry id received from app while processing pub/sub event %v", response.GetEntryId()) continue } } diff --git a/pkg/runtime/processor/pubsub/bulk_subscriber_test.go b/pkg/runtime/processor/pubsub/bulk_subscriber_test.go index 6cd69443c73..f6d34206d05 100644 --- a/pkg/runtime/processor/pubsub/bulk_subscriber_test.go +++ b/pkg/runtime/processor/pubsub/bulk_subscriber_test.go @@ -54,20 +54,23 @@ import ( const ( TestRuntimeConfigID = "consumer0" - data1 string = `{"orderId":"1"}` - data2 string = `{"orderId":"2"}` - data3 string = `{"orderId":"3"}` - data4 string = `{"orderId":"4"}` - data5 string = `{"orderId":"5"}` - data6 string = `{"orderId":"6"}` - data7 string = `{"orderId":"7"}` - data8 string = `{"orderId":"8"}` - data9 string = `` - data10 string = `{"orderId":"10"}` - ext1Key string = "ext1Key" - ext1Value string = "ext1Value" - ext2Key string = "ext2Key" - ext2Value string = "ext2Value" + eventKey = `"event":` + + data1 string = `{"orderId":"1"}` + data2 string = `{"orderId":"2"}` + data3 string = `{"orderId":"3"}` + data4 string = `{"orderId":"4"}` + data5 string = `{"orderId":"5"}` + data6 string = `{"orderId":"6"}` + data7 string = `{"orderId":"7"}` + data8 string = `{"orderId":"8"}` + data9 string = `` + data10 string = `{"orderId":"10"}` + ext1Key string = "ext1Key" + ext1Value string = "ext1Value" + ext2Key string = "ext2Key" + ext2Value string = "ext2Value" + //nolint:goconst order1 string = `{"data":` + data1 + `,"datacontenttype":"application/json","` + ext1Key + `":"` + ext1Value + `","id":"9b6767c3-04b5-4871-96ae-c6bde0d5e16d","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","traceparent":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","tracestate":"","type":"type1"}` order2 string = `{"data":` + data2 + `,"datacontenttype":"application/json","` + ext2Key + `":"` + ext2Value + `","id":"993f4e4a-05e5-4772-94a4-e899b1af0131","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","traceparent":"00-1343b02c3af4f9b352d4cb83d6c8cb81-82a64f8c4433e2c4-01","tracestate":"","type":"type2"}` order3 string = `{"data":` + data3 + `,"datacontenttype":"application/json","` + ext1Key + `":"` + ext1Value + `","id":"6767010u-04b5-4871-96ae-c6bde0d5e16d","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","traceparent":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","tracestate":"","type":"type1"}` @@ -174,14 +177,14 @@ func TestBulkSubscribe(t *testing.T) { mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp, nil) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) err := ps.Publish(context.TODO(), &contribpubsub.PublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Data: []byte(`{"orderId":"1"}`), }) - assert.Error(t, err) + require.Error(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -226,7 +229,7 @@ func TestBulkSubscribe(t *testing.T) { mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp, nil) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) order := `{"data":{"orderId":1},"datacontenttype":"application/json","id":"8b540b03-04b5-4871-96ae-c6bde0d5e16d","pubsubname":"orderpubsub","source":"checkout","specversion":"1.0","topic":"orders","traceid":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","traceparent":"00-e61de949bb4de415a7af49fc86675648-ffb64972bb907224-01","tracestate":"","type":"com.dapr.event.sent"}` @@ -235,7 +238,7 @@ func TestBulkSubscribe(t *testing.T) { Topic: "topic0", Data: []byte(order), }) - assert.Error(t, err) + require.Error(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -243,7 +246,7 @@ func TestBulkSubscribe(t *testing.T) { assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Contains(t, string(reqs["orders"]), "\"event\":"+order) + assert.Contains(t, string(reqs["orders"]), eventKey+order) }) t.Run("bulk Subscribe multiple Messages at once for cloud events", func(t *testing.T) { @@ -286,7 +289,7 @@ func TestBulkSubscribe(t *testing.T) { mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp1, nil) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) msgArr := getBulkMessageEntries(2) @@ -296,9 +299,9 @@ func TestBulkSubscribe(t *testing.T) { Entries: msgArr, }) - assert.Equal(t, 2, len(ms.GetBulkResponse().Statuses)) - assert.Error(t, ms.GetBulkResponse().Error) - assert.Nil(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b")) + assert.Len(t, ms.GetBulkResponse().Statuses, 2) + require.Error(t, ms.GetBulkResponse().Error) + require.NoError(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b")) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) @@ -307,8 +310,8 @@ func TestBulkSubscribe(t *testing.T) { assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Contains(t, string(reqs["orders"]), `"event":`+order1) - assert.Contains(t, string(reqs["orders"]), `"event":`+order2) + assert.Contains(t, string(reqs["orders"]), eventKey+order1) + assert.Contains(t, string(reqs["orders"]), eventKey+order2) fakeResp2 := invokev1.NewInvokeMethodResponse(404, "OK", nil) defer fakeResp2.Close() @@ -325,17 +328,17 @@ func TestBulkSubscribe(t *testing.T) { Entries: msgArr, }) - assert.Equal(t, 3, len(ms.GetBulkResponse().Statuses)) - assert.Nil(t, ms.GetBulkResponse().Error) - assert.Nil(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b", "333333c")) + assert.Len(t, ms.GetBulkResponse().Statuses, 3) + require.NoError(t, ms.GetBulkResponse().Error) + require.NoError(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b", "333333c")) assert.Equal(t, 2, pubsubIns.bulkPubCount["topic0"]) assert.True(t, pubsubIns.isBulkSubscribe) reqs = mockAppChannel1.GetInvokedRequest() mockAppChannel1.AssertNumberOfCalls(t, "InvokeMethod", 1) - assert.Contains(t, string(reqs["orders"]), `"event":`+order1) - assert.Contains(t, string(reqs["orders"]), `"event":`+order2) - assert.Contains(t, string(reqs["orders"]), `"event":`+order3) + assert.Contains(t, string(reqs["orders"]), eventKey+order1) + assert.Contains(t, string(reqs["orders"]), eventKey+order2) + assert.Contains(t, string(reqs["orders"]), eventKey+order3) fakeResp3 := invokev1.NewInvokeMethodResponse(400, "OK", nil) defer fakeResp3.Close() @@ -352,18 +355,18 @@ func TestBulkSubscribe(t *testing.T) { Entries: msgArr, }) - assert.Equal(t, 4, len(ms.GetBulkResponse().Statuses)) - assert.Error(t, ms.GetBulkResponse().Error) - assert.Nil(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b", "333333c", "4444444d")) + assert.Len(t, ms.GetBulkResponse().Statuses, 4) + require.Error(t, ms.GetBulkResponse().Error) + require.NoError(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b", "333333c", "4444444d")) assert.Equal(t, 3, pubsubIns.bulkPubCount["topic0"]) assert.True(t, pubsubIns.isBulkSubscribe) reqs = mockAppChannel2.GetInvokedRequest() mockAppChannel2.AssertNumberOfCalls(t, "InvokeMethod", 1) - assert.Contains(t, string(reqs["orders"]), `"event":`+order1) - assert.Contains(t, string(reqs["orders"]), `"event":`+order2) - assert.Contains(t, string(reqs["orders"]), `"event":`+order3) - assert.Contains(t, string(reqs["orders"]), `"event":`+order4) + assert.Contains(t, string(reqs["orders"]), eventKey+order1) + assert.Contains(t, string(reqs["orders"]), eventKey+order2) + assert.Contains(t, string(reqs["orders"]), eventKey+order3) + assert.Contains(t, string(reqs["orders"]), eventKey+order4) mockAppChannel3 := new(channelt.MockAppChannel) mockAppChannel3.Init() @@ -377,15 +380,15 @@ func TestBulkSubscribe(t *testing.T) { Entries: msgArr, }) - assert.Equal(t, 1, len(ms.GetBulkResponse().Statuses)) - assert.Error(t, ms.GetBulkResponse().Error) - assert.Nil(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a")) + assert.Len(t, ms.GetBulkResponse().Statuses, 1) + require.Error(t, ms.GetBulkResponse().Error) + require.NoError(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a")) assert.Equal(t, 4, pubsubIns.bulkPubCount["topic0"]) assert.True(t, pubsubIns.isBulkSubscribe) reqs = mockAppChannel3.GetInvokedRequest() mockAppChannel3.AssertNumberOfCalls(t, "InvokeMethod", 1) - assert.Contains(t, string(reqs["orders"]), `"event":`+order1) + assert.Contains(t, string(reqs["orders"]), eventKey+order1) }) t.Run("bulk Subscribe events on different paths", func(t *testing.T) { @@ -439,7 +442,7 @@ func TestBulkSubscribe(t *testing.T) { mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), mock.Anything).Return(fakeResp, nil) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) msgArr := getBulkMessageEntries(2) @@ -448,7 +451,7 @@ func TestBulkSubscribe(t *testing.T) { Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) @@ -457,10 +460,10 @@ func TestBulkSubscribe(t *testing.T) { assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) - assert.Contains(t, string(reqs["orders1"]), "\"event\":"+order1) - assert.NotContains(t, string(reqs["orders1"]), "\"event\":"+order2) - assert.Contains(t, string(reqs["orders2"]), "\"event\":"+order2) - assert.NotContains(t, string(reqs["orders2"]), "\"event\":"+order1) + assert.Contains(t, string(reqs["orders1"]), eventKey+order1) + assert.NotContains(t, string(reqs["orders1"]), eventKey+order2) + assert.Contains(t, string(reqs["orders2"]), eventKey+order2) + assert.NotContains(t, string(reqs["orders2"]), eventKey+order1) }) t.Run("verify Responses when bulk Subscribe events on different paths", func(t *testing.T) { @@ -513,7 +516,7 @@ func TestBulkSubscribe(t *testing.T) { mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) msgArr := getBulkMessageEntries(10) responseItemsOrders1 := contribpubsub.AppBulkResponse{ @@ -556,7 +559,7 @@ func TestBulkSubscribe(t *testing.T) { Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) @@ -565,14 +568,14 @@ func TestBulkSubscribe(t *testing.T) { assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) - assert.True(t, verifyIfEventContainsStrings(reqs["orders1"], "\"event\":"+order1, - "\"event\":"+order3, "\"event\":"+order5, "\"event\":"+order7, "\"event\":"+order8, "\"event\":"+order9)) - assert.True(t, verifyIfEventNotContainsStrings(reqs["orders1"], "\"event\":"+order2, - "\"event\":"+order4, "\"event\":"+order6, "\"event\":"+order10)) - assert.True(t, verifyIfEventContainsStrings(reqs["orders2"], "\"event\":"+order2, - "\"event\":"+order4, "\"event\":"+order6, "\"event\":"+order10)) - assert.True(t, verifyIfEventNotContainsStrings(reqs["orders2"], "\"event\":"+order1, - "\"event\":"+order3, "\"event\":"+order5, "\"event\":"+order7, "\"event\":"+order8, "\"event\":"+order9)) + assert.True(t, verifyIfEventContainsStrings(reqs["orders1"], eventKey+order1, + eventKey+order3, eventKey+order5, eventKey+order7, eventKey+order8, eventKey+order9)) + assert.True(t, verifyIfEventNotContainsStrings(reqs["orders1"], eventKey+order2, + eventKey+order4, eventKey+order6, eventKey+order10)) + assert.True(t, verifyIfEventContainsStrings(reqs["orders2"], eventKey+order2, + eventKey+order4, eventKey+order6, eventKey+order10)) + assert.True(t, verifyIfEventNotContainsStrings(reqs["orders2"], eventKey+order1, + eventKey+order3, eventKey+order5, eventKey+order7, eventKey+order8, eventKey+order9)) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -631,7 +634,7 @@ func TestBulkSubscribe(t *testing.T) { mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) msgArr := getBulkMessageEntries(4) msgArr[0].EntryId = "" @@ -657,7 +660,7 @@ func TestBulkSubscribe(t *testing.T) { Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) @@ -666,10 +669,10 @@ func TestBulkSubscribe(t *testing.T) { assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.True(t, verifyIfEventContainsStrings(reqs["orders"], "\"event\":"+order2, - "\"event\":"+order4)) - assert.True(t, verifyIfEventNotContainsStrings(reqs["orders"], "\"event\":"+order1, - "\"event\":"+order3)) + assert.True(t, verifyIfEventContainsStrings(reqs["orders"], eventKey+order2, + eventKey+order4)) + assert.True(t, verifyIfEventNotContainsStrings(reqs["orders"], eventKey+order1, + eventKey+order3)) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -722,7 +725,7 @@ func TestBulkSubscribe(t *testing.T) { mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) msgArr := getBulkMessageEntries(5) @@ -753,7 +756,7 @@ func TestBulkSubscribe(t *testing.T) { Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) @@ -762,8 +765,8 @@ func TestBulkSubscribe(t *testing.T) { assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.True(t, verifyIfEventContainsStrings(reqs["orders"], "\"event\":"+order1, - "\"event\":"+order2, "\"event\":"+order3, "\"event\":"+order4, "\"event\":"+order5)) + assert.True(t, verifyIfEventContainsStrings(reqs["orders"], eventKey+order1, + eventKey+order2, eventKey+order3, eventKey+order4, eventKey+order5)) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -817,7 +820,7 @@ func TestBulkSubscribe(t *testing.T) { mockAppChannel.On("InvokeMethod", mock.MatchedBy(matchContextInterface), matchDaprRequestMethod("dapr/subscribe")).Return(fakeResp, nil) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) msgArr := getBulkMessageEntries(5) @@ -846,7 +849,7 @@ func TestBulkSubscribe(t *testing.T) { Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) @@ -855,8 +858,8 @@ func TestBulkSubscribe(t *testing.T) { assert.True(t, pubsubIns.isBulkSubscribe) reqs := mockAppChannel.GetInvokedRequest() mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.True(t, verifyIfEventContainsStrings(reqs["orders"], "\"event\":"+order1, - "\"event\":"+order2, "\"event\":"+order3, "\"event\":"+order4, "\"event\":"+order5)) + assert.True(t, verifyIfEventContainsStrings(reqs["orders"], eventKey+order1, + eventKey+order2, eventKey+order3, eventKey+order4, eventKey+order5)) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -967,18 +970,18 @@ func TestBulkSubscribeGRPC(t *testing.T) { ps.grpc = grpc require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) _, err = ps.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) - assert.Equal(t, 2, len(ms.GetBulkResponse().Statuses)) - assert.Nil(t, ms.GetBulkResponse().Error) - assert.Nil(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b")) + assert.Len(t, ms.GetBulkResponse().Statuses, 2) + require.NoError(t, ms.GetBulkResponse().Error) + require.NoError(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b")) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -1000,9 +1003,9 @@ func TestBulkSubscribeGRPC(t *testing.T) { Topic: "topic0", Entries: msgArr, }) - assert.Equal(t, 2, len(ms.GetBulkResponse().Statuses)) - assert.Nil(t, ms.GetBulkResponse().Error) - assert.Nil(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b")) + assert.Len(t, ms.GetBulkResponse().Statuses, 2) + require.NoError(t, ms.GetBulkResponse().Error) + require.NoError(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b")) mockServer.Error = status.Error(codes.Unknown, "unknown error") ps.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ @@ -1010,9 +1013,9 @@ func TestBulkSubscribeGRPC(t *testing.T) { Topic: "topic0", Entries: msgArr, }) - assert.Equal(t, 2, len(ms.GetBulkResponse().Statuses)) - assert.Error(t, ms.GetBulkResponse().Error) - assert.Nil(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b")) + assert.Len(t, ms.GetBulkResponse().Statuses, 2) + require.Error(t, ms.GetBulkResponse().Error) + require.NoError(t, assertItemExistsOnce(ms.GetBulkResponse().Statuses, "1111111a", "2222222b")) }) t.Run("GRPC - bulk Subscribe cloud event Message on different paths and verify response", func(t *testing.T) { @@ -1131,7 +1134,7 @@ func TestBulkSubscribeGRPC(t *testing.T) { Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -1232,14 +1235,14 @@ func TestBulkSubscribeGRPC(t *testing.T) { ps.grpc = grpc require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) _, err = ps.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -1342,14 +1345,14 @@ func TestBulkSubscribeGRPC(t *testing.T) { ps.grpc = grpc require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) _, err = ps.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -1447,14 +1450,14 @@ func TestBulkSubscribeGRPC(t *testing.T) { ps.grpc = grpc require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) _, err = ps.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -1537,14 +1540,14 @@ func TestBulkSubscribeGRPC(t *testing.T) { ps.grpc = grpc require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) _, err = ps.BulkPublish(context.TODO(), &contribpubsub.BulkPublishRequest{ PubsubName: testBulkSubscribePubsub, Topic: "topic0", Entries: msgArr, }) - assert.Nil(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testBulkSubscribePubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -1561,7 +1564,7 @@ func TestBulkSubscribeGRPC(t *testing.T) { func startTestAppCallbackAlphaGRPCServer(t *testing.T, port int, mockServer *channelt.MockServer) *grpc.Server { lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - assert.NoError(t, err) + require.NoError(t, err) grpcServer := grpc.NewServer() go func() { runtimev1pb.RegisterAppCallbackServer(grpcServer, mockServer) @@ -1628,8 +1631,8 @@ func verifyBulkSubscribeRequest(expectedData []string, expectedExtension Expecte actual *runtimev1pb.TopicEventBulkRequest, ) bool { for i, expectedEntryReq := range expectedData { - if expectedEntryReq != string(actual.Entries[i].GetCloudEvent().GetData()) || - actual.Entries[i].GetCloudEvent().GetExtensions().GetFields()[expectedExtension.extKey].GetStringValue() != expectedExtension.extValue { + if expectedEntryReq != string(actual.GetEntries()[i].GetCloudEvent().GetData()) || + actual.GetEntries()[i].GetCloudEvent().GetExtensions().GetFields()[expectedExtension.extKey].GetStringValue() != expectedExtension.extValue { return false } } @@ -1802,14 +1805,14 @@ func TestPubSubDeadLetter(t *testing.T) { Return(nil, errors.New("failed to send")) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) err := ps.Publish(context.TODO(), &contribpubsub.PublishRequest{ PubsubName: testDeadLetterPubsub, Topic: "topic0", Data: []byte(`{"id":"1"}`), }) - assert.NoError(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testDeadLetterPubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) @@ -1858,19 +1861,19 @@ func TestPubSubDeadLetter(t *testing.T) { Return(nil, errors.New("failed to send")) require.NoError(t, ps.Init(context.TODO(), pubsubComponent)) - assert.NoError(t, ps.StartSubscriptions(context.TODO())) + require.NoError(t, ps.StartSubscriptions(context.TODO())) err := ps.Publish(context.TODO(), &contribpubsub.PublishRequest{ PubsubName: testDeadLetterPubsub, Topic: "topic0", Data: []byte(`{"id":"1"}`), }) - assert.NoError(t, err) + require.NoError(t, err) pubSub, ok := ps.compStore.GetPubSub(testDeadLetterPubsub) require.True(t, ok) pubsubIns := pubSub.Component.(*mockSubscribePubSub) // Consider of resiliency, publish message may retry in some cases, make sure the pub count is greater than 1. - assert.True(t, pubsubIns.pubCount["topic0"] >= 1) + assert.GreaterOrEqual(t, pubsubIns.pubCount["topic0"], 1) // Make sure every message that is sent to topic0 is sent to its dead letter topic1. assert.Equal(t, pubsubIns.pubCount["topic0"], pubsubIns.pubCount["topic1"]) // Except of the one getting config from app, make sure each publish will result to twice subscribe call @@ -1885,7 +1888,7 @@ func matchContextInterface(v any) bool { func matchDaprRequestMethod(method string) any { return mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - if req == nil || req.Message() == nil || req.Message().Method != method { + if req == nil || req.Message() == nil || req.Message().GetMethod() != method { return false } return true diff --git a/pkg/runtime/processor/pubsub/bulksub_resiliency_test.go b/pkg/runtime/processor/pubsub/bulksub_resiliency_test.go index 35622330c70..915adb61429 100644 --- a/pkg/runtime/processor/pubsub/bulksub_resiliency_test.go +++ b/pkg/runtime/processor/pubsub/bulksub_resiliency_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" contribpubsub "github.com/dapr/components-contrib/pubsub" resiliencyV1alpha "github.com/dapr/dapr/pkg/apis/resiliency/v1alpha1" @@ -235,7 +236,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) // After(3 * time.Second) @@ -251,7 +252,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -279,7 +280,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "9999999i": 1, "10101010j": 2, }, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -304,7 +305,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) // After(3 * time.Second) @@ -320,7 +321,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -348,7 +349,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "9999999i": 3, "10101010j": 3, }, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -373,7 +374,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) // After(3 * time.Second) @@ -389,7 +390,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -417,7 +418,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "9999999i": 2, "10101010j": 2, }, ts.entryIdRetryTimes) - assert.Nil(t, e) + require.NoError(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -442,7 +443,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) // After(3 * time.Second) @@ -458,7 +459,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -486,7 +487,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "9999999i": 1, "10101010j": 1, }, ts.entryIdRetryTimes) - assert.Nil(t, e) + require.NoError(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -512,7 +513,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ). After(3 * time.Second) @@ -527,7 +528,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { in := getInput() b, e := ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) expectedResponse := BulkResponseExpectation{ Responses: []BulkResponseEntryExpectation{ @@ -543,8 +544,8 @@ func TestBulkSubscribeResiliency(t *testing.T) { {EntryId: "10101010j", IsError: true}, }, } - assert.NotNil(t, e) - assert.ErrorIs(t, e, context.DeadlineExceeded) + require.Error(t, e) + require.ErrorIs(t, e, context.DeadlineExceeded) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -569,7 +570,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { @@ -617,18 +618,18 @@ func TestBulkSubscribeResiliency(t *testing.T) { "10101010j": 2, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -654,7 +655,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { @@ -702,18 +703,18 @@ func TestBulkSubscribeResiliency(t *testing.T) { "10101010j": 2, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -739,7 +740,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { @@ -787,9 +788,9 @@ func TestBulkSubscribeResiliency(t *testing.T) { "10101010j": 2, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.Nil(t, e) + require.NoError(t, e) // assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -815,7 +816,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { @@ -863,9 +864,9 @@ func TestBulkSubscribeResiliency(t *testing.T) { "10101010j": 2, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) // assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) @@ -900,9 +901,9 @@ func TestBulkSubscribeResiliency(t *testing.T) { "10101010j": 3, } mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.Nil(t, e) + require.NoError(t, e) // assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -929,7 +930,7 @@ func TestBulkSubscribeResiliency(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ). After(3 * time.Second) @@ -965,15 +966,15 @@ func TestBulkSubscribeResiliency(t *testing.T) { {EntryId: "10101010j", IsError: true}, }, } - assert.Equal(t, 10, len(*b)) - assert.NotNil(t, e) + assert.Len(t, *b, 10) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) - assert.Equal(t, 10, len(*b)) - assert.NotNil(t, e) + assert.Len(t, *b, 10) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) @@ -1001,7 +1002,7 @@ func TestBulkSubscribeResiliencyStateConversionsFromHalfOpen(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ) mockee.RunFn = func(args mock.Arguments) { @@ -1051,9 +1052,9 @@ func TestBulkSubscribeResiliencyStateConversionsFromHalfOpen(t *testing.T) { } // 2 invoke calls should be made here, as the circuit breaker becomes open mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 2) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) @@ -1090,9 +1091,9 @@ func TestBulkSubscribeResiliencyStateConversionsFromHalfOpen(t *testing.T) { // as this operation is partial failure case and circuit breaker is half-open, this failure // would mark state as open mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) @@ -1100,9 +1101,9 @@ func TestBulkSubscribeResiliencyStateConversionsFromHalfOpen(t *testing.T) { b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 3) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.NotNil(t, e) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) @@ -1139,9 +1140,9 @@ func TestBulkSubscribeResiliencyStateConversionsFromHalfOpen(t *testing.T) { // As this operation succeeds with all entries passed, circuit breaker should be closed // as successCount becomes equal or greater than maxRequests mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 4) - assert.Equal(t, 10, len(*b)) + assert.Len(t, *b, 10) assertRetryCount(t, expectedCBRetryCount, ts.entryIdRetryTimes) - assert.Nil(t, e) + require.NoError(t, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) } @@ -1169,7 +1170,7 @@ func TestBulkSubscribeResiliencyWithLongRetries(t *testing.T) { "InvokeMethod", mock.MatchedBy(matchContextInterface), mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - return req.Message().Method == orders1 + return req.Message().GetMethod() == orders1 }), ). After(3 * time.Second) @@ -1205,15 +1206,15 @@ func TestBulkSubscribeResiliencyWithLongRetries(t *testing.T) { {EntryId: "10101010j", IsError: true}, }, } - assert.Equal(t, 10, len(*b)) - assert.NotNil(t, e) + assert.Len(t, *b, 10) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) b, e = ps.applyBulkSubscribeResiliency(context.TODO(), &in.bscData, in.pbsm, "dlq", orders1, policyDef, true, in.envelope) - assert.Equal(t, 10, len(*b)) - assert.NotNil(t, e) + assert.Len(t, *b, 10) + require.Error(t, e) assert.Equal(t, breaker.ErrOpenState, e) assert.True(t, verifyBulkSubscribeResponses(expectedResponse, *b)) }) diff --git a/pkg/runtime/processor/pubsub/publish.go b/pkg/runtime/processor/pubsub/publish.go index f19e9eb6162..82b52abca54 100644 --- a/pkg/runtime/processor/pubsub/publish.go +++ b/pkg/runtime/processor/pubsub/publish.go @@ -125,7 +125,7 @@ func (p *pubsub) publishMessageHTTP(ctx context.Context, msg *subscribedMessage) } defer resp.Close() - statusCode := int(resp.Status().Code) + statusCode := int(resp.Status().GetCode()) if span != nil { m := diag.ConstructSubscriptionSpanAttributes(msg.topic) @@ -213,7 +213,7 @@ func (p *pubsub) publishMessageGRPC(ctx context.Context, msg *subscribedMessage) } else if data, ok := cloudEvent[contribpubsub.DataField]; ok && data != nil { envelope.Data = nil - if contenttype.IsStringContentType(envelope.DataContentType) { + if contenttype.IsStringContentType(envelope.GetDataContentType()) { switch v := data.(type) { case string: envelope.Data = []byte(v) @@ -223,7 +223,7 @@ func (p *pubsub) publishMessageGRPC(ctx context.Context, msg *subscribedMessage) diag.DefaultComponentMonitoring.PubsubIngressEvent(ctx, msg.pubsub, strings.ToLower(string(contribpubsub.Retry)), msg.topic, 0) return fmt.Errorf("error returned from app while processing pub/sub event: %w", rterrors.NewRetriable(errUnexpectedEnvelopeData)) } - } else if contenttype.IsJSONContentType(envelope.DataContentType) || contenttype.IsCloudEventContentType(envelope.DataContentType) { + } else if contenttype.IsJSONContentType(envelope.GetDataContentType()) || contenttype.IsCloudEventContentType(envelope.GetDataContentType()) { envelope.Data, _ = json.Marshal(data) } } @@ -269,7 +269,7 @@ func (p *pubsub) publishMessageGRPC(ctx context.Context, msg *subscribedMessage) elapsed := diag.ElapsedSince(start) if span != nil { - m := diag.ConstructSubscriptionSpanAttributes(envelope.Topic) + m := diag.ConstructSubscriptionSpanAttributes(envelope.GetTopic()) diag.AddAttributesToSpan(span, m) diag.UpdateSpanStatusFromGRPCError(span, err) span.End() diff --git a/pkg/runtime/processor/pubsub/publish_test.go b/pkg/runtime/processor/pubsub/publish_test.go index 73448776058..d8bd6016bcf 100644 --- a/pkg/runtime/processor/pubsub/publish_test.go +++ b/pkg/runtime/processor/pubsub/publish_test.go @@ -105,7 +105,7 @@ func TestErrorPublishedNonCloudEventHTTP(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.Equal(t, nil, err) + require.NoError(t, err) }) t.Run("ok with retry", func(t *testing.T) { @@ -125,7 +125,7 @@ func TestErrorPublishedNonCloudEventHTTP(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.Error(t, err) + require.Error(t, err) }) t.Run("ok with drop", func(t *testing.T) { @@ -165,7 +165,7 @@ func TestErrorPublishedNonCloudEventHTTP(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.Error(t, err) + require.Error(t, err) }) t.Run("not found response", func(t *testing.T) { @@ -183,7 +183,7 @@ func TestErrorPublishedNonCloudEventHTTP(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.NoError(t, err) + require.NoError(t, err) }) } @@ -271,9 +271,9 @@ func TestErrorPublishedNonCloudEventGRPC(t *testing.T) { err := ps.publishMessageGRPC(context.Background(), testPubSubMessage) if tc.ExpectError { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } @@ -285,7 +285,7 @@ func TestOnNewPublishedMessage(t *testing.T) { envelope := contribpubsub.NewCloudEventsEnvelope("", "", contribpubsub.DefaultCloudEventType, "", topic, TestSecondPubsubName, "", []byte("Test Message"), "", "") b, err := json.Marshal(envelope) - assert.NoError(t, err) + require.NoError(t, err) testPubSubMessage := &subscribedMessage{ cloudEvent: envelope, @@ -337,7 +337,7 @@ func TestOnNewPublishedMessage(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.NoError(t, err) + require.NoError(t, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) @@ -358,7 +358,7 @@ func TestOnNewPublishedMessage(t *testing.T) { []byte("Test Message"), "", "") delete(envelopeNoTraceID, contribpubsub.TraceIDField) bNoTraceID, err := json.Marshal(envelopeNoTraceID) - assert.NoError(t, err) + require.NoError(t, err) message := &subscribedMessage{ cloudEvent: envelopeNoTraceID, @@ -380,7 +380,7 @@ func TestOnNewPublishedMessage(t *testing.T) { err = ps.publishMessageHTTP(context.Background(), message) // assert - assert.NoError(t, err) + require.NoError(t, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) @@ -400,7 +400,7 @@ func TestOnNewPublishedMessage(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.NoError(t, err) + require.NoError(t, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) @@ -420,7 +420,7 @@ func TestOnNewPublishedMessage(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.NoError(t, err) + require.NoError(t, err) mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) @@ -483,7 +483,7 @@ func TestOnNewPublishedMessage(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.Error(t, err, "expected error on unknown status") + require.Error(t, err, "expected error on unknown status") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) @@ -503,7 +503,7 @@ func TestOnNewPublishedMessage(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.NoError(t, err, "expected no error on empty status") + require.NoError(t, err, "expected no error on empty status") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) @@ -523,7 +523,7 @@ func TestOnNewPublishedMessage(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.Nil(t, err, "expected no error on unknown status") + require.NoError(t, err, "expected no error on unknown status") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) @@ -558,7 +558,7 @@ func TestOnNewPublishedMessage(t *testing.T) { err := ps.publishMessageHTTP(context.Background(), testPubSubMessage) // assert - assert.Nil(t, err, "expected error to be nil") + require.NoError(t, err, "expected error to be nil") mockAppChannel.AssertNumberOfCalls(t, "InvokeMethod", 1) }) @@ -599,7 +599,7 @@ func TestOnNewPublishedMessageGRPC(t *testing.T) { envelope["customArray"] = []interface{}{"a", "b", 789, 3.1415} envelope["customMap"] = map[string]interface{}{"a": "b", "c": 456} b, err := json.Marshal(envelope) - assert.NoError(t, err) + require.NoError(t, err) testPubSubMessage := &subscribedMessage{ cloudEvent: envelope, @@ -619,7 +619,7 @@ func TestOnNewPublishedMessageGRPC(t *testing.T) { envelope["customArray"] = []interface{}{"a", "b", 789, 3.1415} envelope["customMap"] = map[string]interface{}{"a": "b", "c": 456} base64, err := json.Marshal(envelope) - assert.NoError(t, err) + require.NoError(t, err) testPubSubMessageBase64 := &subscribedMessage{ cloudEvent: envelope, @@ -788,7 +788,7 @@ func TestOnNewPublishedMessageGRPC(t *testing.T) { if tc.expectedError != nil { assert.Equal(t, err.Error(), tc.expectedError.Error()) } else { - assert.NoError(t, err) + require.NoError(t, err) } }) } diff --git a/pkg/runtime/processor/pubsub/pubsub.go b/pkg/runtime/processor/pubsub/pubsub.go index 90e7c83326e..42c2a0d0fb2 100644 --- a/pkg/runtime/processor/pubsub/pubsub.go +++ b/pkg/runtime/processor/pubsub/pubsub.go @@ -276,7 +276,7 @@ func extractCloudEvent(event map[string]interface{}) (runtimev1pb.TopicEventBulk if data, ok := event[contribpubsub.DataField]; ok && data != nil { envelope.Data = nil - if contenttype.IsStringContentType(envelope.DataContentType) { + if contenttype.IsStringContentType(envelope.GetDataContentType()) { switch v := data.(type) { case string: envelope.Data = []byte(v) @@ -285,7 +285,7 @@ func extractCloudEvent(event map[string]interface{}) (runtimev1pb.TopicEventBulk default: return runtimev1pb.TopicEventBulkRequestEntry_CloudEvent{}, errUnexpectedEnvelopeData //nolint:nosnakecase } - } else if contenttype.IsJSONContentType(envelope.DataContentType) || contenttype.IsCloudEventContentType(envelope.DataContentType) { + } else if contenttype.IsJSONContentType(envelope.GetDataContentType()) || contenttype.IsCloudEventContentType(envelope.GetDataContentType()) { envelope.Data, _ = json.Marshal(data) } } diff --git a/pkg/runtime/processor/pubsub/pubsub_test.go b/pkg/runtime/processor/pubsub/pubsub_test.go index 356b155d546..27553785100 100644 --- a/pkg/runtime/processor/pubsub/pubsub_test.go +++ b/pkg/runtime/processor/pubsub/pubsub_test.go @@ -163,10 +163,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.NoError(t, ps.StartSubscriptions(context.Background())) + require.NoError(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -195,10 +195,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.NoError(t, ps.StartSubscriptions(context.Background())) + require.NoError(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -221,10 +221,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.NoError(t, ps.StartSubscriptions(context.Background())) + require.NoError(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -246,8 +246,8 @@ func TestInitPubSub(t *testing.T) { Channels: new(channels.Channels), }) routes, err := pst.topicRoutes(context.Background()) - assert.NoError(t, err) - assert.Equal(t, 0, len(routes)) + require.NoError(t, err) + assert.Empty(t, routes) }) t.Run("load declarative subscription, no scopes", func(t *testing.T) { @@ -268,7 +268,7 @@ func TestInitPubSub(t *testing.T) { s := testDeclarativeSubscription() cleanup, err := writeComponentToDisk(s, "sub.yaml") - assert.NoError(t, err) + require.NoError(t, err) defer cleanup() pst.resourcesPath = []string{resourcesDir} @@ -301,7 +301,7 @@ func TestInitPubSub(t *testing.T) { s.Scopes = []string{TestRuntimeConfigID} cleanup, err := writeComponentToDisk(s, "sub.yaml") - assert.NoError(t, err) + require.NoError(t, err) defer cleanup() pst.resourcesPath = []string{resourcesDir} @@ -334,12 +334,12 @@ func TestInitPubSub(t *testing.T) { s.Scopes = []string{"scope1"} cleanup, err := writeComponentToDisk(s, "sub.yaml") - assert.NoError(t, err) + require.NoError(t, err) defer cleanup() pst.resourcesPath = []string{resourcesDir} subs := pst.declarativeSubscriptions(context.Background()) - assert.Len(t, subs, 0) + assert.Empty(t, subs) }) t.Run("test subscribe, app allowed 1 topic", func(t *testing.T) { @@ -359,10 +359,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.NoError(t, ps.StartSubscriptions(context.Background())) + require.NoError(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -389,10 +389,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.NoError(t, ps.StartSubscriptions(context.Background())) + require.NoError(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -419,10 +419,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.NoError(t, ps.StartSubscriptions(context.Background())) + require.NoError(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -449,10 +449,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.NoError(t, ps.StartSubscriptions(context.Background())) + require.NoError(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -479,7 +479,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } // assert @@ -507,7 +507,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } // assert @@ -536,10 +536,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.Error(t, ps.StartSubscriptions(context.Background())) + require.Error(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -567,10 +567,10 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.Error(t, ps.StartSubscriptions(context.Background())) + require.Error(t, ps.StartSubscriptions(context.Background())) // assert mockPubSub.AssertNumberOfCalls(t, "Init", 1) @@ -586,7 +586,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } ps.compStore.AddPubSub(TestPubsubName, compstore.PubsubItem{Component: &mockPublishPubSub{}}) @@ -606,7 +606,7 @@ func TestInitPubSub(t *testing.T) { }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, res.FailedEntries) ps.compStore.AddPubSub(TestSecondPubsubName, compstore.PubsubItem{Component: &mockPublishPubSub{}}) @@ -627,7 +627,7 @@ func TestInitPubSub(t *testing.T) { }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, res.FailedEntries) }) @@ -637,7 +637,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } ps.compStore.AddPubSub(TestPubsubName, compstore.PubsubItem{ @@ -661,7 +661,7 @@ func TestInitPubSub(t *testing.T) { }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, res.FailedEntries) ps.compStore.AddPubSub(TestSecondPubsubName, compstore.PubsubItem{ @@ -686,7 +686,7 @@ func TestInitPubSub(t *testing.T) { }, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, res.FailedEntries) }) @@ -696,7 +696,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - require.Nil(t, err) + require.NoError(t, err) } ps.compStore.AddPubSub(TestPubsubName, compstore.PubsubItem{ @@ -719,7 +719,7 @@ func TestInitPubSub(t *testing.T) { }, }, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Empty(t, res) ps.compStore.AddPubSub(TestSecondPubsubName, compstore.PubsubItem{ @@ -739,7 +739,7 @@ func TestInitPubSub(t *testing.T) { }, }, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Empty(t, res) }) @@ -749,7 +749,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - require.Nil(t, err) + require.NoError(t, err) } ps.compStore.AddPubSub(TestPubsubName, compstore.PubsubItem{ @@ -772,7 +772,7 @@ func TestInitPubSub(t *testing.T) { }, }, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Empty(t, res) ps.compStore.AddPubSub(TestSecondPubsubName, compstore.PubsubItem{ @@ -792,7 +792,7 @@ func TestInitPubSub(t *testing.T) { }, }, }) - assert.NotNil(t, err) + require.Error(t, err) assert.Empty(t, res) }) @@ -813,7 +813,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } ps.compStore.AddPubSub(TestPubsubName, compstore.PubsubItem{ @@ -827,7 +827,7 @@ func TestInitPubSub(t *testing.T) { Metadata: md, }) - assert.NoError(t, err) + require.NoError(t, err) ps.compStore.AddPubSub(TestSecondPubsubName, compstore.PubsubItem{ Component: &mockPublishPubSub{}, @@ -837,7 +837,7 @@ func TestInitPubSub(t *testing.T) { Topic: "topic1", }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("test publish, topic protected, with scopes, publish succeeds", func(t *testing.T) { @@ -857,7 +857,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - assert.NoError(t, err) + require.NoError(t, err) } ps.compStore.AddPubSub(TestPubsubName, compstore.PubsubItem{ @@ -873,7 +873,7 @@ func TestInitPubSub(t *testing.T) { Metadata: md, }) - assert.NoError(t, err) + require.NoError(t, err) ps.compStore.AddPubSub(TestSecondPubsubName, compstore.PubsubItem{ Component: &mockPublishPubSub{}, @@ -885,7 +885,7 @@ func TestInitPubSub(t *testing.T) { Topic: "topic1", }) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("test publish, topic not allowed", func(t *testing.T) { @@ -905,7 +905,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - require.Nil(t, err) + require.NoError(t, err) } ps.compStore.AddPubSub(TestPubsubName, compstore.PubsubItem{ @@ -916,7 +916,7 @@ func TestInitPubSub(t *testing.T) { PubsubName: TestPubsubName, Topic: "topic5", }) - assert.NotNil(t, err) + require.Error(t, err) ps.compStore.AddPubSub(TestSecondPubsubName, compstore.PubsubItem{ Component: &mockPublishPubSub{}, @@ -926,7 +926,7 @@ func TestInitPubSub(t *testing.T) { PubsubName: TestSecondPubsubName, Topic: "topic5", }) - assert.NotNil(t, err) + require.Error(t, err) }) t.Run("test publish, topic protected, no scopes, publish fails", func(t *testing.T) { @@ -946,7 +946,7 @@ func TestInitPubSub(t *testing.T) { // act for _, comp := range pubsubComponents { err := ps.Init(context.Background(), comp) - require.Nil(t, err) + require.NoError(t, err) } ps.compStore.AddPubSub(TestPubsubName, compstore.PubsubItem{ @@ -957,7 +957,7 @@ func TestInitPubSub(t *testing.T) { PubsubName: TestPubsubName, Topic: "topic1", }) - assert.NotNil(t, err) + require.Error(t, err) ps.compStore.AddPubSub(TestSecondPubsubName, compstore.PubsubItem{ Component: &mockPublishPubSub{}, @@ -967,7 +967,7 @@ func TestInitPubSub(t *testing.T) { PubsubName: TestSecondPubsubName, Topic: "topic1", }) - assert.NotNil(t, err) + require.Error(t, err) }) t.Run("test protected topics, no scopes, operation not allowed", func(t *testing.T) { @@ -1163,7 +1163,7 @@ func TestConsumerID(t *testing.T) { }) err := ps.Init(context.Background(), pubsubComponent) - assert.NoError(t, err) + require.NoError(t, err) } // helper to populate subscription array for 2 pubsubs. @@ -1351,7 +1351,7 @@ func TestPubsubWithResiliency(t *testing.T) { component.Spec.Type = "pubsub.failingPubsub" err := ps.Init(context.TODO(), component) - assert.NoError(t, err) + require.NoError(t, err) t.Run("pubsub publish retries with resiliency", func(t *testing.T) { req := &contribpubsub.PublishRequest{ @@ -1360,7 +1360,7 @@ func TestPubsubWithResiliency(t *testing.T) { } err := ps.Publish(context.Background(), req) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingPubsub.Failure.CallCount("failingTopic")) }) @@ -1374,7 +1374,7 @@ func TestPubsubWithResiliency(t *testing.T) { err := ps.Publish(context.Background(), req) end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingPubsub.Failure.CallCount("timeoutTopic")) assert.Less(t, end.Sub(start), time.Second*10) }) @@ -1408,7 +1408,7 @@ func TestPubsubWithResiliency(t *testing.T) { defer cancel() err := ps.beginPubSub(ctx, "failPubsub") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, failingAppChannel.Failure.CallCount("failingSubTopic")) }) @@ -1440,7 +1440,7 @@ func TestPubsubWithResiliency(t *testing.T) { err := ps.beginPubSub(ctx, "failPubsub") end := time.Now() - assert.Error(t, err) + require.Error(t, err) assert.Equal(t, 2, failingAppChannel.Failure.CallCount("timeoutSubTopic")) assert.Less(t, end.Sub(start), time.Second*10) }) @@ -1501,11 +1501,11 @@ func TestPubsubLifecycle(t *testing.T) { }, "mockPubSubBeta") err := ps.Init(context.Background(), comp1) - assert.NoError(t, err) + require.NoError(t, err) err = ps.Init(context.Background(), comp2) - assert.NoError(t, err) + require.NoError(t, err) err = ps.Init(context.Background(), comp3) - assert.NoError(t, err) + require.NoError(t, err) forEachPubSub := func(f func(name string, comp *daprt.InMemoryPubsub)) int { i := 0 @@ -1595,7 +1595,7 @@ func TestPubsubLifecycle(t *testing.T) { setTopicRoutes() subscriptionsCh = make(chan struct{}, 5) - assert.NoError(t, ps.StartSubscriptions(context.Background())) + require.NoError(t, ps.StartSubscriptions(context.Background())) done := forEachPubSub(func(name string, comp *daprt.InMemoryPubsub) { switch name { @@ -1619,7 +1619,7 @@ func TestPubsubLifecycle(t *testing.T) { "expected %v to equal %v", subscriptions["mockPubSub1"], []string{"topic1"}) assert.True(t, reflect.DeepEqual(subscriptions["mockPubSub2"], []string{"topic2", "topic3"}), "expected %v to equal %v", subscriptions["mockPubSub2"], []string{"topic2", "topic3"}) - assert.Len(t, subscriptions["mockPubSub3"], 0) + assert.Empty(t, subscriptions["mockPubSub3"]) } t.Run("subscribe to 3 topics on 2 components", subscribePredefined) @@ -1641,7 +1641,7 @@ func TestPubsubLifecycle(t *testing.T) { for _, m := range send { //nolint:gosec err = ps.Publish(context.Background(), &m) - assert.NoError(t, err) + require.NoError(t, err) } for i := 0; i < expect; i++ { @@ -1650,7 +1650,7 @@ func TestPubsubLifecycle(t *testing.T) { // Sleep to ensure no new messages have come in time.Sleep(10 * time.Millisecond) - assert.Len(t, messagesCh, 0) + assert.Empty(t, messagesCh) msgMux.Lock() close(messagesCh) @@ -1730,7 +1730,7 @@ func TestPubsubLifecycle(t *testing.T) { sendMessages(t, 0) - require.Len(t, messages, 0) + require.Empty(t, messages) comp2.AssertCalled(t, "unsubscribed", "topic3") comp3.AssertCalled(t, "unsubscribed", "topic4") }) diff --git a/pkg/runtime/processor/secret/secret_test.go b/pkg/runtime/processor/secret/secret_test.go index 2ef74cbbabb..0dcd466d246 100644 --- a/pkg/runtime/processor/secret/secret_test.go +++ b/pkg/runtime/processor/secret/secret_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/dapr/components-contrib/secretstores" @@ -76,7 +77,7 @@ func TestProcessResourceSecrets(t *testing.T) { ) // add Kubernetes component manually - assert.NoError(t, sec.Init(context.Background(), componentsapi.Component{ + require.NoError(t, sec.Init(context.Background(), componentsapi.Component{ ObjectMeta: metav1.ObjectMeta{ Name: compsecret.BuiltinKubernetesSecretStore, }, @@ -128,7 +129,7 @@ func TestProcessResourceSecrets(t *testing.T) { Version: "v1", }, }) - assert.NoError(t, err) + require.NoError(t, err) updated, unready := sec.ProcessResource(context.Background(), mockBinding) assert.True(t, updated) diff --git a/pkg/runtime/processor/state/state_test.go b/pkg/runtime/processor/state/state_test.go index 5ccf55af1f5..de74cf1d11c 100644 --- a/pkg/runtime/processor/state/state_test.go +++ b/pkg/runtime/processor/state/state_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -93,7 +94,7 @@ func TestInitState(t *testing.T) { err := proc.Init(context.TODO(), mockStateComponent("noerror")) // assert - assert.NoError(t, err, "expected no error") + require.NoError(t, err, "expected no error") }) t.Run("test init state store error", func(t *testing.T) { @@ -104,7 +105,7 @@ func TestInitState(t *testing.T) { err := proc.Init(context.TODO(), mockStateComponent("error")) // assert - assert.Error(t, err, "expected error") + require.Error(t, err, "expected error") assert.Equal(t, err.Error(), rterrors.NewInit(rterrors.InitComponentFailure, "error (state.mockState/v1)", assert.AnError).Error(), "expected error strings to match") }) @@ -117,7 +118,7 @@ func TestInitState(t *testing.T) { ok := encryption.EncryptedStateStore("noencryption") // assert - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, ok) }) @@ -131,7 +132,7 @@ func TestInitState(t *testing.T) { ok := encryption.EncryptedStateStore("encryption") // assert - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, ok) }) } diff --git a/pkg/runtime/pubsub/bulkpublish_resiliency_test.go b/pkg/runtime/pubsub/bulkpublish_resiliency_test.go index 1ffcef060af..ef1a33cec23 100644 --- a/pkg/runtime/pubsub/bulkpublish_resiliency_test.go +++ b/pkg/runtime/pubsub/bulkpublish_resiliency_test.go @@ -21,6 +21,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" contribPubsub "github.com/dapr/components-contrib/pubsub" resiliencyV1alpha "github.com/dapr/dapr/pkg/apis/resiliency/v1alpha1" @@ -85,9 +86,9 @@ func (m *mockBulkPublisher) BulkPublish(ctx context.Context, req *contribPubsub. m.entryIDRetryTimes[entry.EntryId] = 1 } // assert the data and metadata are correct - assert.Equal(m.t, entry.Metadata, map[string]string{ + assert.Equal(m.t, map[string]string{ "key" + entry.EntryId: "value" + entry.EntryId, - }) + }, entry.Metadata) assert.Equal(m.t, entry.Event, []byte("data "+entry.EntryId)) } // fail events based on the input count @@ -189,7 +190,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { // Assert // expecting no final error, the events will pass in the second try - assert.Nil(t, err) + require.NoError(t, err) assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) @@ -221,7 +222,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { // Assert // Expect final error from the bulk publisher - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, assert.AnError, err) assert.Len(t, res.FailedEntries, 6) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) @@ -255,7 +256,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { // Assert // expecting no final error, all the events will pass in the second try - assert.Nil(t, err) + require.NoError(t, err) assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) @@ -287,7 +288,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { // Assert // expecting no final error, all the events will pass in a single try - assert.Nil(t, err) + require.NoError(t, err) assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) @@ -322,8 +323,8 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) - assert.ErrorIs(t, err, context.DeadlineExceeded) + require.Error(t, err) + require.ErrorIs(t, err, context.DeadlineExceeded) assert.Len(t, res.FailedEntries, 6) // not asserting the number of called times since it may or may not be updated(component called) in actually code. // In test code, it is not updated. @@ -353,7 +354,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) @@ -376,7 +377,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) @@ -410,7 +411,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 3) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) @@ -433,7 +434,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) @@ -467,8 +468,8 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.Nil(t, err) - assert.Len(t, res.FailedEntries, 0) + require.NoError(t, err) + assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // It is 3 here because the first failure is before resiliency policy starts @@ -510,7 +511,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 3) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) @@ -537,8 +538,8 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.Nil(t, err) - assert.Len(t, res.FailedEntries, 0) + require.NoError(t, err) + assert.Empty(t, res.FailedEntries) assert.Len(t, bulkPublisher.entryIDRetryTimes, 6) t.Logf("event ID try count map %v\n\n", bulkPublisher.entryIDRetryTimes) // Increase retry count for all event IDs, bulkPublisher is called as CB is half-open @@ -582,7 +583,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) // all events fail on timeout // not asserting the number of called times since it may or may not be updated(component called) in actually code. @@ -593,7 +594,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) // Not aaserting the number of called times since it may or may not be updated(component called) in actually code. @@ -631,7 +632,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err := ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) // all events fail on timeout // not asserting the number of called times since it may or may not be updated(component called) in actually code. @@ -642,7 +643,7 @@ func TestApplyBulkPublishResiliency(t *testing.T) { res, err = ApplyBulkPublishResiliency(ctx, req, policyDef, bulkPublisher) // Assert - assert.NotNil(t, err) + require.Error(t, err) assert.Equal(t, breaker.ErrOpenState, err) assert.Len(t, res.FailedEntries, 6) // Not aaserting the number of called times since it may or may not be updated(component called) in actually code. diff --git a/pkg/runtime/pubsub/cloudevents_test.go b/pkg/runtime/pubsub/cloudevents_test.go index 06434436fc9..9047f1f67d9 100644 --- a/pkg/runtime/pubsub/cloudevents_test.go +++ b/pkg/runtime/pubsub/cloudevents_test.go @@ -19,6 +19,7 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewCloudEvent(t *testing.T) { @@ -33,7 +34,7 @@ func TestNewCloudEvent(t *testing.T) { TraceID: "d", Type: "custom-type", }, map[string]string{}) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, ce["id"]) // validates that the ID is generated assert.True(t, validUUID(ce["id"].(string))) // validates that the ID is a UUID assert.Equal(t, "a", ce["source"].(string)) @@ -54,7 +55,7 @@ func TestNewCloudEvent(t *testing.T) { TraceID: "d", Type: "", // defaults to "com.dapr.event.sent" }, map[string]string{}) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "testid", ce["id"].(string)) assert.Equal(t, "Dapr", ce["source"].(string)) assert.Equal(t, "b", ce["topic"].(string)) @@ -83,7 +84,7 @@ func TestNewCloudEvent(t *testing.T) { "cloudevent.traceparent": "overridetraceparent", "cloudevent.tracestate": "overridetracestate", }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "originalpubsub", ce["pubsubname"].(string)) assert.Equal(t, "originaltopic", ce["topic"].(string)) assert.Equal(t, "originaldata", ce["data"].(string)) @@ -113,7 +114,7 @@ func TestNewCloudEvent(t *testing.T) { TraceID: "trace1", Pubsub: "pubsub", }, map[string]string{}) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "event", ce["id"].(string)) assert.Equal(t, "world", ce["data"].(string)) assert.Equal(t, "text/plain", ce["datacontenttype"].(string)) diff --git a/pkg/runtime/pubsub/default_bulkpub_test.go b/pkg/runtime/pubsub/default_bulkpub_test.go index f0fd8db0844..331cdca207d 100644 --- a/pkg/runtime/pubsub/default_bulkpub_test.go +++ b/pkg/runtime/pubsub/default_bulkpub_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" contribPubsub "github.com/dapr/components-contrib/pubsub" daprt "github.com/dapr/dapr/pkg/testing" @@ -101,11 +102,11 @@ func TestBulkPublish_DefaultBulkPublisher(t *testing.T) { // Check if the bulk publish method returns an error. if tc.nErrors > 0 { - assert.Error(t, err) + require.Error(t, err) // Response should contain an entry for each message in the bulk request. - assert.Equal(t, tc.nErrors, len(res.FailedEntries)) + assert.Len(t, res.FailedEntries, tc.nErrors) } else { - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, res.FailedEntries) } diff --git a/pkg/runtime/pubsub/default_bulksub_test.go b/pkg/runtime/pubsub/default_bulksub_test.go index 7583f3a2eb3..9575e579c00 100644 --- a/pkg/runtime/pubsub/default_bulksub_test.go +++ b/pkg/runtime/pubsub/default_bulksub_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" contribPubsub "github.com/dapr/components-contrib/pubsub" ) @@ -72,7 +73,7 @@ func TestFlushMessages(t *testing.T) { } flushMessages(context.Background(), "topic", tc.messages, tc.msgCbMap, handler) - assert.Equal(t, handlerInvoked, tc.expectedHandlerInvoked) + assert.Equal(t, tc.expectedHandlerInvoked, handlerInvoked) }) } }) @@ -144,9 +145,9 @@ func TestFlushMessages(t *testing.T) { for id, err := range invokedCallbacks { if _, ok := tc.entryIdErrMap[id]; ok { - assert.NotNil(t, err) + require.Error(t, err) } else { - assert.Nil(t, err) + require.NoError(t, err) } } }) diff --git a/pkg/runtime/pubsub/outbox_test.go b/pkg/runtime/pubsub/outbox_test.go index 457cec14583..4dc5b587791 100644 --- a/pkg/runtime/pubsub/outbox_test.go +++ b/pkg/runtime/pubsub/outbox_test.go @@ -165,9 +165,9 @@ func TestAddOrUpdateOutbox(t *testing.T) { }) c := o.outboxStores["test"] - assert.Equal(t, c.outboxPubsub, "2") - assert.Equal(t, c.publishPubSub, "a") - assert.Equal(t, c.publishTopic, "1") + assert.Equal(t, "2", c.outboxPubsub) + assert.Equal(t, "a", c.publishPubSub) + assert.Equal(t, "1", c.publishTopic) }) t.Run("config default values correct", func(t *testing.T) { @@ -199,9 +199,9 @@ func TestAddOrUpdateOutbox(t *testing.T) { }) c := o.outboxStores["test"] - assert.Equal(t, c.outboxPubsub, "a") - assert.Equal(t, c.publishPubSub, "a") - assert.Equal(t, c.publishTopic, "1") + assert.Equal(t, "a", c.outboxPubsub) + assert.Equal(t, "a", c.publishPubSub) + assert.Equal(t, "1", c.publishTopic) }) } @@ -211,7 +211,7 @@ func TestPublishInternal(t *testing.T) { o.publishFn = func(ctx context.Context, pr *contribPubsub.PublishRequest) error { var cloudEvent map[string]interface{} err := json.Unmarshal(pr.Data, &cloudEvent) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", cloudEvent["data"]) assert.Equal(t, "a", pr.PubsubName) @@ -256,7 +256,7 @@ func TestPublishInternal(t *testing.T) { }, }, "testapp", "", "") - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("valid operation, no datacontenttype", func(t *testing.T) { @@ -264,7 +264,7 @@ func TestPublishInternal(t *testing.T) { o.publishFn = func(ctx context.Context, pr *contribPubsub.PublishRequest) error { var cloudEvent map[string]interface{} err := json.Unmarshal(pr.Data, &cloudEvent) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", cloudEvent["data"]) assert.Equal(t, "a", pr.PubsubName) @@ -311,7 +311,7 @@ func TestPublishInternal(t *testing.T) { }, }, "testapp", "", "") - assert.NoError(t, err) + require.NoError(t, err) }) type customData struct { @@ -323,13 +323,13 @@ func TestPublishInternal(t *testing.T) { o.publishFn = func(ctx context.Context, pr *contribPubsub.PublishRequest) error { var cloudEvent map[string]interface{} err := json.Unmarshal(pr.Data, &cloudEvent) - assert.NoError(t, err) + require.NoError(t, err) data := cloudEvent["data"] j := customData{} err = json.Unmarshal([]byte(data.(string)), &j) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", j.Name) assert.Equal(t, "a", pr.PubsubName) @@ -382,7 +382,7 @@ func TestPublishInternal(t *testing.T) { }, }, "testapp", "", "") - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("missing state store", func(t *testing.T) { @@ -394,7 +394,7 @@ func TestPublishInternal(t *testing.T) { Value: "test", }, }, "testapp", "", "") - assert.Error(t, err) + require.Error(t, err) }) t.Run("no op when no transactions", func(t *testing.T) { @@ -432,7 +432,7 @@ func TestPublishInternal(t *testing.T) { _, err := o.PublishInternal(context.TODO(), "test", []state.TransactionalStateOperation{}, "testapp", "", "") - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("error when pubsub fails", func(t *testing.T) { @@ -474,7 +474,7 @@ func TestPublishInternal(t *testing.T) { }, }, "testapp", "", "") - assert.Error(t, err) + require.Error(t, err) }) } @@ -640,8 +640,8 @@ func TestSubscribeToInternalTopics(t *testing.T) { }, }, appID, "", "") - assert.Error(t, pErr) - assert.Len(t, trs, 0) + require.Error(t, pErr) + assert.Empty(t, trs) }) t.Run("outbox state not present", func(t *testing.T) { @@ -727,7 +727,7 @@ func TestSubscribeToInternalTopics(t *testing.T) { }() d, err := time.ParseDuration(stateScan) - assert.NoError(t, err) + require.NoError(t, err) start := time.Now() doneCh := make(chan error, 2) @@ -852,7 +852,7 @@ func TestSubscribeToInternalTopics(t *testing.T) { }() d, err := time.ParseDuration(stateScan) - assert.NoError(t, err) + require.NoError(t, err) start := time.Now() doneCh := make(chan error, 2) @@ -909,7 +909,7 @@ func (o *outboxPubsubMock) Publish(ctx context.Context, req *contribPubsub.Publi }) if o.validateNoError { - assert.NoError(o.t, err) + require.NoError(o.t, err) return } }() diff --git a/pkg/runtime/pubsub/subscriptions.go b/pkg/runtime/pubsub/subscriptions.go index d8e1f083142..9f3e936d698 100644 --- a/pkg/runtime/pubsub/subscriptions.go +++ b/pkg/runtime/pubsub/subscriptions.go @@ -95,7 +95,7 @@ func GetSubscriptionsHTTP(ctx context.Context, channel channel.AppChannel, log l subscriptionItems []SubscriptionJSON ) - switch resp.Status().Code { + switch resp.Status().GetCode() { case http.StatusOK: err = json.NewDecoder(resp.RawData()).Decode(&subscriptionItems) if err != nil { @@ -153,7 +153,7 @@ func GetSubscriptionsHTTP(ctx context.Context, channel channel.AppChannel, log l default: // Unexpected response: both GRPC and HTTP have to log the same level. - log.Errorf("app returned http status code %v from subscription endpoint", resp.Status().Code) + log.Errorf("app returned http status code %v from subscription endpoint", resp.Status().GetCode()) } log.Debugf("app responded with subscriptions %v", subscriptions) @@ -198,28 +198,28 @@ func GetSubscriptionsGRPC(ctx context.Context, channel runtimev1pb.AppCallbackCl } var subscriptions []Subscription - if resp == nil || len(resp.Subscriptions) == 0 { + if resp == nil || len(resp.GetSubscriptions()) == 0 { log.Debug(noSubscriptionsError) } else { - subscriptions = make([]Subscription, len(resp.Subscriptions)) - for i, s := range resp.Subscriptions { - rules, err := parseRoutingRulesGRPC(s.Routes) + subscriptions = make([]Subscription, len(resp.GetSubscriptions())) + for i, s := range resp.GetSubscriptions() { + rules, err := parseRoutingRulesGRPC(s.GetRoutes()) if err != nil { return nil, err } var bulkSubscribe *BulkSubscribe - if s.BulkSubscribe != nil { + if s.GetBulkSubscribe() != nil { bulkSubscribe = &BulkSubscribe{ - Enabled: s.BulkSubscribe.Enabled, - MaxMessagesCount: s.BulkSubscribe.MaxMessagesCount, - MaxAwaitDurationMs: s.BulkSubscribe.MaxAwaitDurationMs, + Enabled: s.GetBulkSubscribe().GetEnabled(), + MaxMessagesCount: s.GetBulkSubscribe().GetMaxMessagesCount(), + MaxAwaitDurationMs: s.GetBulkSubscribe().GetMaxAwaitDurationMs(), } } subscriptions[i] = Subscription{ - PubsubName: s.PubsubName, + PubsubName: s.GetPubsubName(), Topic: s.GetTopic(), Metadata: s.GetMetadata(), - DeadLetterTopic: s.DeadLetterTopic, + DeadLetterTopic: s.GetDeadLetterTopic(), Rules: rules, BulkSubscribe: bulkSubscribe, } @@ -399,10 +399,10 @@ func parseRoutingRulesGRPC(routes *runtimev1pb.TopicRoutes) ([]*Rule, error) { Path: "", }}, nil } - r := make([]*Rule, 0, len(routes.Rules)+1) + r := make([]*Rule, 0, len(routes.GetRules())+1) - for _, rule := range routes.Rules { - rr, err := createRoutingRule(rule.Match, rule.Path) + for _, rule := range routes.GetRules() { + rr, err := createRoutingRule(rule.GetMatch(), rule.GetPath()) if err != nil { return nil, err } @@ -412,9 +412,9 @@ func parseRoutingRulesGRPC(routes *runtimev1pb.TopicRoutes) ([]*Rule, error) { // If a default path is set, add a rule with a nil `Match`, // which is treated as `true` and always selected if // no previous rules match. - if routes.Default != "" { + if routes.GetDefault() != "" { r = append(r, &Rule{ - Path: routes.Default, + Path: routes.GetDefault(), }) } @@ -457,7 +457,7 @@ func DeclarativeKubernetes(ctx context.Context, client operatorv1pb.OperatorClie return subs } - for _, s := range resp.Subscriptions { + for _, s := range resp.GetSubscriptions() { // No namespace filtering here as it's been already filtered by the operator subs, err = appendSubscription(subs, s, "") if err != nil { diff --git a/pkg/runtime/pubsub/subscriptions_test.go b/pkg/runtime/pubsub/subscriptions_test.go index 07b33a6f508..a74088206ca 100644 --- a/pkg/runtime/pubsub/subscriptions_test.go +++ b/pkg/runtime/pubsub/subscriptions_test.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -159,13 +160,13 @@ func TestDeclarativeSubscriptionsV1(t *testing.T) { t.Run("load multiple subscriptions in different files", func(t *testing.T) { for i := 0; i < subscriptionCount; i++ { s := testDeclarativeSubscriptionV1() - s.Spec.Topic = fmt.Sprintf("%v", i) - s.Spec.Route = fmt.Sprintf("%v", i) - s.Spec.Pubsubname = fmt.Sprintf("%v", i) + s.Spec.Topic = strconv.Itoa(i) + s.Spec.Route = strconv.Itoa(i) + s.Spec.Pubsubname = strconv.Itoa(i) s.Spec.Metadata = map[string]string{ - "testName": fmt.Sprintf("%v", i), + "testName": strconv.Itoa(i), } - s.Scopes = []string{fmt.Sprintf("%v", i)} + s.Scopes = []string{strconv.Itoa(i)} filepath := fmt.Sprintf("%s/%v.yaml", dir, i) writeSubscriptionToDisk(s, filepath) @@ -175,13 +176,13 @@ func TestDeclarativeSubscriptionsV1(t *testing.T) { subs := DeclarativeLocal([]string{dir}, "", log) if assert.Len(t, subs, subscriptionCount) { for i := 0; i < subscriptionCount; i++ { - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].Topic) - if assert.Equal(t, 1, len(subs[i].Rules)) { - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].Rules[0].Path) + assert.Equal(t, strconv.Itoa(i), subs[i].Topic) + if assert.Len(t, subs[i].Rules, 1) { + assert.Equal(t, strconv.Itoa(i), subs[i].Rules[0].Path) } - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].PubsubName) - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].Scopes[0]) - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].Metadata["testName"]) + assert.Equal(t, strconv.Itoa(i), subs[i].PubsubName) + assert.Equal(t, strconv.Itoa(i), subs[i].Scopes[0]) + assert.Equal(t, strconv.Itoa(i), subs[i].Metadata["testName"]) } } }) @@ -193,13 +194,13 @@ func TestDeclarativeSubscriptionsV1(t *testing.T) { for i := 0; i < subscriptionCount; i++ { s := testDeclarativeSubscriptionV1() - s.Spec.Topic = fmt.Sprintf("%v", i) - s.Spec.Route = fmt.Sprintf("%v", i) - s.Spec.Pubsubname = fmt.Sprintf("%v", i) + s.Spec.Topic = strconv.Itoa(i) + s.Spec.Route = strconv.Itoa(i) + s.Spec.Pubsubname = strconv.Itoa(i) s.Spec.Metadata = map[string]string{ - "testName": fmt.Sprintf("%v", i), + "testName": strconv.Itoa(i), } - s.Scopes = []string{fmt.Sprintf("%v", i)} + s.Scopes = []string{strconv.Itoa(i)} subscriptions = append(subscriptions, s) } @@ -211,13 +212,13 @@ func TestDeclarativeSubscriptionsV1(t *testing.T) { subs := DeclarativeLocal([]string{dir}, "", log) if assert.Len(t, subs, subscriptionCount) { for i := 0; i < subscriptionCount; i++ { - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].Topic) - if assert.Equal(t, 1, len(subs[i].Rules)) { - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].Rules[0].Path) + assert.Equal(t, strconv.Itoa(i), subs[i].Topic) + if assert.Len(t, subs[i].Rules, 1) { + assert.Equal(t, strconv.Itoa(i), subs[i].Rules[0].Path) } - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].PubsubName) - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].Scopes[0]) - assert.Equal(t, fmt.Sprintf("%v", i), subs[i].Metadata["testName"]) + assert.Equal(t, strconv.Itoa(i), subs[i].PubsubName) + assert.Equal(t, strconv.Itoa(i), subs[i].Scopes[0]) + assert.Equal(t, strconv.Itoa(i), subs[i].Metadata["testName"]) } } }) @@ -319,7 +320,7 @@ func TestDeclarativeSubscriptionsV1(t *testing.T) { defer os.RemoveAll(filePath) subs := DeclarativeLocal([]string{dir}, "", log) - assert.Len(t, subs, 0) + assert.Empty(t, subs) }) t.Run("no subscriptions loaded", func(t *testing.T) { @@ -331,7 +332,7 @@ func TestDeclarativeSubscriptionsV1(t *testing.T) { writeSubscriptionToDisk(s, dir) subs := DeclarativeLocal([]string{dir}, "", log) - assert.Len(t, subs, 0) + assert.Empty(t, subs) }) } @@ -365,7 +366,7 @@ func TestDeclarativeSubscriptionsV2(t *testing.T) { t.Run("load multiple subscriptions in different files", func(t *testing.T) { for i := 0; i < subscriptionCount; i++ { - iStr := fmt.Sprintf("%v", i) + iStr := strconv.Itoa(i) s := testDeclarativeSubscriptionV2() s.Spec.Topic = iStr for j := range s.Spec.Routes.Rules { @@ -386,9 +387,9 @@ func TestDeclarativeSubscriptionsV2(t *testing.T) { subs := DeclarativeLocal([]string{dir}, "", log) if assert.Len(t, subs, subscriptionCount) { for i := 0; i < subscriptionCount; i++ { - iStr := fmt.Sprintf("%v", i) + iStr := strconv.Itoa(i) assert.Equal(t, iStr, subs[i].Topic) - if assert.Equal(t, 3, len(subs[i].Rules)) { + if assert.Len(t, subs[i].Rules, 3) { assert.Equal(t, iStr, subs[i].Rules[0].Path) } assert.Equal(t, iStr, subs[i].PubsubName) @@ -401,7 +402,7 @@ func TestDeclarativeSubscriptionsV2(t *testing.T) { t.Run("load multiple subscriptions in single file", func(t *testing.T) { subscriptions := []any{} for i := 0; i < subscriptionCount; i++ { - iStr := fmt.Sprintf("%v", i) + iStr := strconv.Itoa(i) s := testDeclarativeSubscriptionV2() s.Spec.Topic = iStr for j := range s.Spec.Routes.Rules { @@ -424,9 +425,9 @@ func TestDeclarativeSubscriptionsV2(t *testing.T) { subs := DeclarativeLocal([]string{dir}, "", log) if assert.Len(t, subs, subscriptionCount) { for i := 0; i < subscriptionCount; i++ { - iStr := fmt.Sprintf("%v", i) + iStr := strconv.Itoa(i) assert.Equal(t, iStr, subs[i].Topic) - if assert.Equal(t, 3, len(subs[i].Rules)) { + if assert.Len(t, subs[i].Rules, 3) { assert.Equal(t, iStr, subs[i].Rules[0].Path) } assert.Equal(t, iStr, subs[i].PubsubName) @@ -445,7 +446,7 @@ func TestDeclarativeSubscriptionsV2(t *testing.T) { writeSubscriptionToDisk(s, dir) subs := DeclarativeLocal([]string{dir}, "", log) - assert.Len(t, subs, 0) + assert.Empty(t, subs) }) } @@ -732,7 +733,7 @@ func TestGRPCSubscriptions(t *testing.T) { // not implemented error is not retried and is returned as "zero" subscriptions require.NoError(t, err) assert.Equal(t, 1, m.callCount) - assert.Len(t, subs, 0) + assert.Empty(t, subs) }) t.Run("error from app, success after retries with resiliency", func(t *testing.T) { @@ -765,7 +766,7 @@ func TestGRPCSubscriptions(t *testing.T) { // not implemented error is not retried and is returned as "zero" subscriptions require.NoError(t, err) assert.Equal(t, 1, m.callCount) - assert.Len(t, subs, 0) + assert.Empty(t, subs) }) } @@ -817,7 +818,7 @@ func TestGetRuleMatchString(t *testing.T) { for _, v := range cases { rule, err := createRoutingRule(v.Match, v.Path) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, v.Match, rule.Match.String()) } } diff --git a/pkg/runtime/runtime_test.go b/pkg/runtime/runtime_test.go index bdd440e89af..d4d20263a57 100644 --- a/pkg/runtime/runtime_test.go +++ b/pkg/runtime/runtime_test.go @@ -109,7 +109,7 @@ func TestNewRuntime(t *testing.T) { }, &config.Configuration{}, &config.AccessControlList{}, resiliency.New(logger.NewLogger("test"))) // assert - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, r, "runtime must be initiated") } @@ -156,7 +156,7 @@ func TestDoProcessComponent(t *testing.T) { err := rt.processor.Init(context.Background(), lockComponent) // assert - assert.Error(t, err, "expected an error") + require.Error(t, err, "expected an error") assert.Equal(t, err.Error(), rterrors.NewInit(rterrors.InitComponentFailure, "testlock (lock.mockLock/v1)", assert.AnError).Error(), "expected error strings to match") }) @@ -179,7 +179,7 @@ func TestDoProcessComponent(t *testing.T) { err := rt.processor.Init(context.Background(), lockComponentV3) // assert - assert.Error(t, err, "expected an error") + require.Error(t, err, "expected an error") assert.Equal(t, err.Error(), rterrors.NewInit(rterrors.CreateComponentFailure, "testlock (lock.mockLock/v3)", fmt.Errorf("couldn't find lock store lock.mockLock/v3")).Error()) }) @@ -208,7 +208,7 @@ func TestDoProcessComponent(t *testing.T) { // act err := rt.processor.Init(context.Background(), lockComponentWithWrongStrategy) // assert - assert.Error(t, err) + require.Error(t, err) }) t.Run("lock init successfully and set right strategy", func(t *testing.T) { @@ -227,11 +227,11 @@ func TestDoProcessComponent(t *testing.T) { // act err := rt.processor.Init(context.Background(), lockComponent) // assert - assert.Nil(t, err, "unexpected error") + require.NoError(t, err, "unexpected error") // get modified key key, err := lockLoader.GetModifiedLockKey("test", "mockLock", "appid-1") - assert.Nil(t, err, "unexpected error") - assert.Equal(t, key, "lock||appid-1||test") + require.NoError(t, err, "unexpected error") + assert.Equal(t, "lock||appid-1||test", key) }) t.Run("test error on pubsub init", func(t *testing.T) { @@ -257,7 +257,7 @@ func TestDoProcessComponent(t *testing.T) { err := rt.processor.Init(context.Background(), pubsubComponent) // assert - assert.Error(t, err, "expected an error") + require.Error(t, err, "expected an error") assert.Equal(t, err.Error(), rterrors.NewInit(rterrors.InitComponentFailure, "testpubsub (pubsub.mockPubSub/v1)", assert.AnError).Error(), "expected error strings to match") }) @@ -269,7 +269,7 @@ func TestDoProcessComponent(t *testing.T) { }, }) // assert - assert.Error(t, err, "error expected") + require.Error(t, err, "error expected") }) } @@ -472,7 +472,7 @@ func TestInitNameResolution(t *testing.T) { err = rt.initNameResolution(context.Background()) // assert - assert.Error(t, err) + require.Error(t, err) }) t.Run("test init nameresolution", func(t *testing.T) { @@ -492,7 +492,7 @@ func TestInitNameResolution(t *testing.T) { err = rt.initNameResolution(context.Background()) // assert - assert.NoError(t, err, "expected no error") + require.NoError(t, err, "expected no error") }) t.Run("test init nameresolution default in StandaloneMode", func(t *testing.T) { @@ -510,7 +510,7 @@ func TestInitNameResolution(t *testing.T) { err = rt.initNameResolution(context.Background()) // assert - assert.NoError(t, err, "expected no error") + require.NoError(t, err, "expected no error") }) t.Run("test init nameresolution nil in StandaloneMode", func(t *testing.T) { @@ -528,7 +528,7 @@ func TestInitNameResolution(t *testing.T) { err = rt.initNameResolution(context.Background()) // assert - assert.NoError(t, err, "expected no error") + require.NoError(t, err, "expected no error") }) t.Run("test init nameresolution default in KubernetesMode", func(t *testing.T) { @@ -546,7 +546,7 @@ func TestInitNameResolution(t *testing.T) { err = rt.initNameResolution(context.Background()) // assert - assert.NoError(t, err, "expected no error") + require.NoError(t, err, "expected no error") }) t.Run("test init nameresolution nil in KubernetesMode", func(t *testing.T) { @@ -564,7 +564,7 @@ func TestInitNameResolution(t *testing.T) { err = rt.initNameResolution(context.Background()) // assert - assert.NoError(t, err, "expected no error") + require.NoError(t, err, "expected no error") }) } @@ -642,12 +642,12 @@ func TestSetupTracing(t *testing.T) { expectedExporters: []sdktrace.SpanExporter{&diagUtils.StdoutExporter{}, &zipkin.Exporter{}, &otlptrace.Exporter{}}, }} - for _, tc := range testcases { + for i, tc := range testcases { t.Run(tc.name, func(t *testing.T) { rt, err := NewTestDaprRuntime(t, modes.StandaloneMode) require.NoError(t, err) defer stopRuntime(t, rt) - rt.globalConfig.Spec.TracingSpec = &tc.tracingConfig + rt.globalConfig.Spec.TracingSpec = &testcases[i].tracingConfig if tc.hostAddress != "" { rt.hostAddress = tc.hostAddress } @@ -657,7 +657,7 @@ func TestSetupTracing(t *testing.T) { if err := rt.setupTracing(context.Background(), rt.hostAddress, tpStore); tc.expectedErr != "" { assert.Contains(t, err.Error(), tc.expectedErr) } else { - assert.NoError(t, err) + require.NoError(t, err) } if len(tc.expectedExporters) > 0 { assert.True(t, tpStore.HasExporter()) @@ -768,7 +768,7 @@ func assertBuiltInSecretStore(t *testing.T, rt *DaprRuntime) { return ok }, time.Second*2, time.Millisecond*100) - assert.NoError(t, rt.runnerCloser.Close()) + require.NoError(t, rt.runnerCloser.Close()) } func NewTestDaprRuntime(t *testing.T, mode modes.DaprMode) (*DaprRuntime, error) { @@ -843,7 +843,7 @@ func NewTestDaprRuntimeConfig(t *testing.T, mode modes.DaprMode, appProtocol str func TestGracefulShutdown(t *testing.T) { r, err := NewTestDaprRuntime(t, modes.StandaloneMode) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, time.Second, r.runtimeConfig.gracefulShutdownDuration) } @@ -872,13 +872,13 @@ func TestPodName(t *testing.T) { func TestInitActors(t *testing.T) { t.Run("missing namespace on kubernetes", func(t *testing.T) { r, err := NewTestDaprRuntime(t, modes.KubernetesMode) - assert.NoError(t, err) + require.NoError(t, err) defer stopRuntime(t, r) r.namespace = "" r.runtimeConfig.mTLSEnabled = true err = r.initActors(context.TODO()) - assert.Error(t, err) + require.Error(t, err) }) t.Run("actors hosted = true", func(t *testing.T) { @@ -913,7 +913,7 @@ func TestInitActors(t *testing.T) { r.channels.Refresh() err = r.initActors(context.TODO()) - assert.NotNil(t, err) + require.Error(t, err) }) t.Run("the state stores can still be initialized normally", func(t *testing.T) { @@ -945,7 +945,7 @@ func TestInitActors(t *testing.T) { assert.False(t, ok) assert.Equal(t, "", name) err = r.initActors(context.TODO()) - assert.NotNil(t, err) + require.Error(t, err) }) } @@ -1180,7 +1180,7 @@ func TestCloseWithErrors(t *testing.T) { } func stopRuntime(t *testing.T, rt *DaprRuntime) { - assert.NoError(t, rt.runnerCloser.Close()) + require.NoError(t, rt.runnerCloser.Close()) } func TestComponentsCallback(t *testing.T) { @@ -1222,7 +1222,7 @@ func TestComponentsCallback(t *testing.T) { cancel() select { case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) case <-time.After(10 * time.Second): t.Fatal("timed out waiting for runtime to stop") } @@ -1262,7 +1262,7 @@ func TestGRPCProxy(t *testing.T) { cancel() select { case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) case <-time.After(5 * time.Second): t.Fatal("timed out waiting for runtime to stop") } @@ -1331,7 +1331,7 @@ func TestShutdownWithWait(t *testing.T) { dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} - assert.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` + require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: @@ -1385,7 +1385,7 @@ spec: select { case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to stop") } @@ -1411,7 +1411,7 @@ spec: ) dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} - assert.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` + require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: @@ -1457,7 +1457,7 @@ spec: select { case err := <-errCh: - assert.Error(t, err) + require.Error(t, err) case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to stop") } @@ -1493,7 +1493,7 @@ spec: dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} - assert.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` + require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: @@ -1525,7 +1525,7 @@ spec: select { case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to stop") } @@ -1564,7 +1564,7 @@ spec: dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} - assert.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` + require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: @@ -1583,7 +1583,7 @@ spec: select { case err := <-errCh: - assert.ErrorContains(t, err, "this is an error") + require.ErrorContains(t, err, "this is an error") case <-time.After(5 * time.Second): t.Error("timed out waiting for runtime to error") } @@ -1633,7 +1633,7 @@ spec: dir := t.TempDir() rt.runtimeConfig.standalone.ResourcesPath = []string{dir} - assert.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` + require.NoError(t, os.WriteFile(filepath.Join(dir, "kubernetesMock.yaml"), []byte(` apiVersion: dapr.io/v1alpha1 kind: Component metadata: @@ -1733,15 +1733,15 @@ func TestGetComponentsCapabilitiesMap(t *testing.T) { require.NoError(t, rt.processor.Init(context.Background(), cSecretStore)) capabilities := rt.getComponentsCapabilitesMap() - assert.Equal(t, 5, len(capabilities), + assert.Len(t, capabilities, 5, "All 5 registered components have are present in capabilities (stateStore pubSub input output secretStore)") - assert.Equal(t, 2, len(capabilities["mockPubSub"]), + assert.Len(t, capabilities["mockPubSub"], 2, "mockPubSub has 2 features because we mocked it so") - assert.Equal(t, 1, len(capabilities["testInputBinding"]), + assert.Len(t, capabilities["testInputBinding"], 1, "Input bindings always have INPUT_BINDING added to their capabilities") - assert.Equal(t, 1, len(capabilities["testOutputBinding"]), + assert.Len(t, capabilities["testOutputBinding"], 1, "Output bindings always have OUTPUT_BINDING added to their capabilities") - assert.Equal(t, 1, len(capabilities[mockSecretStoreName]), + assert.Len(t, capabilities[mockSecretStoreName], 1, "mockSecretStore has a single feature and it should be present") } @@ -1793,7 +1793,7 @@ func (s *pingStreamService) PingStream(stream pb.TestService_PingStreamServer) e } else if err != nil { return err } - pong := &pb.PingResponse{Value: ping.Value, Counter: counter} + pong := &pb.PingResponse{Value: ping.GetValue(), Counter: counter} if err := stream.Send(pong); err != nil { return err } @@ -1804,7 +1804,7 @@ func (s *pingStreamService) PingStream(stream pb.TestService_PingStreamServer) e func matchDaprRequestMethod(method string) any { return mock.MatchedBy(func(req *invokev1.InvokeMethodRequest) bool { - if req == nil || req.Message() == nil || req.Message().Method != method { + if req == nil || req.Message() == nil || req.Message().GetMethod() != method { return false } return true @@ -1843,15 +1843,15 @@ func TestGracefulShutdownBindings(t *testing.T) { cout.Spec.Type = "bindings.testOutputBinding" require.NoError(t, rt.processor.Init(context.Background(), cin)) require.NoError(t, rt.processor.Init(context.Background(), cout)) - assert.Equal(t, len(rt.compStore.ListInputBindings()), 1) - assert.Equal(t, len(rt.compStore.ListOutputBindings()), 1) + assert.Len(t, rt.compStore.ListInputBindings(), 1) + assert.Len(t, rt.compStore.ListOutputBindings(), 1) cancel() select { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "input bindings shutdown timed out") case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) } } @@ -1920,7 +1920,7 @@ func TestGracefulShutdownPubSub(t *testing.T) { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "pubsub shutdown timed out") case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) } } @@ -1982,9 +1982,9 @@ func TestGracefulShutdownActors(t *testing.T) { err = rt.processor.Init(context.Background(), mockStateComponent) // assert - assert.NoError(t, err, "expected no error") + require.NoError(t, err, "expected no error") - assert.NoError(t, rt.initActors(context.TODO())) + require.NoError(t, rt.initActors(context.TODO())) cancel() @@ -1992,15 +1992,15 @@ func TestGracefulShutdownActors(t *testing.T) { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "actors shutdown timed out") case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) } var activeActCount int32 runtimeStatus := rt.actor.GetRuntimeStatus(context.Background()) for _, v := range runtimeStatus.GetActiveActors() { - activeActCount += v.Count + activeActCount += v.GetCount() } - assert.Equal(t, activeActCount, int32(0)) + assert.Equal(t, int32(0), activeActCount) } func initMockStateStoreForRuntime(rt *DaprRuntime, encryptKey string, e error) *daprt.MockStateStore { @@ -2062,7 +2062,7 @@ func TestTraceShutdown(t *testing.T) { case <-time.After(rt.runtimeConfig.gracefulShutdownDuration + 2*time.Second): assert.Fail(t, "tracing shutdown timed out") case err := <-errCh: - assert.NoError(t, err) + require.NoError(t, err) } assert.Nil(t, rt.tracerProvider) diff --git a/pkg/runtime/wfengine/activity.go b/pkg/runtime/wfengine/activity.go index ecb39ed1626..13c77972f45 100644 --- a/pkg/runtime/wfengine/activity.go +++ b/pkg/runtime/wfengine/activity.go @@ -155,7 +155,7 @@ func (a *activityActor) executeActivity(ctx context.Context, actorID string, nam workflowID := actorID[0:endIndex] wi := &backend.ActivityWorkItem{ - SequenceNumber: int64(taskEvent.EventId), + SequenceNumber: int64(taskEvent.GetEventId()), InstanceID: api.InstanceID(workflowID), NewEvent: taskEvent, Properties: make(map[string]interface{}), diff --git a/pkg/runtime/wfengine/backend.go b/pkg/runtime/wfengine/backend.go index e151192187f..61df6de9c8d 100644 --- a/pkg/runtime/wfengine/backend.go +++ b/pkg/runtime/wfengine/backend.go @@ -187,7 +187,7 @@ func (be *actorBackend) GetOrchestrationMetadata(ctx context.Context, id api.Ins // AbandonActivityWorkItem implements backend.Backend. It gets called by durabletask-go when there is // an unexpected failure in the workflow activity execution pipeline. func (*actorBackend) AbandonActivityWorkItem(ctx context.Context, wi *backend.ActivityWorkItem) error { - wfLogger.Warnf("%s: aborting activity execution (::%d)", wi.InstanceID, wi.NewEvent.EventId) + wfLogger.Warnf("%s: aborting activity execution (::%d)", wi.InstanceID, wi.NewEvent.GetEventId()) // Sending false signals the waiting activity actor to abort the activity execution. if channel, ok := wi.Properties[CallbackChannelProperty]; ok { diff --git a/pkg/runtime/wfengine/component.go b/pkg/runtime/wfengine/component.go index a03832b0c24..8ab9c7f6b36 100644 --- a/pkg/runtime/wfengine/component.go +++ b/pkg/runtime/wfengine/component.go @@ -199,8 +199,8 @@ func (c *workflowEngineComponent) Get(ctx context.Context, req *workflows.GetReq // Status-specific fields if metadata.FailureDetails != nil { - res.Workflow.Properties["dapr.workflow.failure.error_type"] = metadata.FailureDetails.ErrorType - res.Workflow.Properties["dapr.workflow.failure.error_message"] = metadata.FailureDetails.ErrorMessage + res.Workflow.Properties["dapr.workflow.failure.error_type"] = metadata.FailureDetails.GetErrorType() + res.Workflow.Properties["dapr.workflow.failure.error_message"] = metadata.FailureDetails.GetErrorMessage() } else if metadata.IsComplete() { res.Workflow.Properties["dapr.workflow.output"] = metadata.SerializedOutput } diff --git a/pkg/runtime/wfengine/wfengine_test.go b/pkg/runtime/wfengine/wfengine_test.go index 630a592c5c7..a6c7b40f60d 100644 --- a/pkg/runtime/wfengine/wfengine_test.go +++ b/pkg/runtime/wfengine/wfengine_test.go @@ -24,6 +24,7 @@ import ( "context" "fmt" "sort" + "strconv" "strings" "sync/atomic" "testing" @@ -64,7 +65,7 @@ func TestStartWorkflowEngine(t *testing.T) { grpcServer := grpc.NewServer() engine.RegisterGrpcServer(grpcServer) err := engine.Start(ctx) - assert.NoError(t, err) + require.NoError(t, err) } // GetTestOptions returns an array of functions for configuring the workflow engine. Each @@ -201,13 +202,11 @@ func TestActivityChainingWorkflow(t *testing.T) { for _, opt := range GetTestOptions() { t.Run(opt(engine), func(t *testing.T) { id, err := client.ScheduleNewOrchestration(ctx, "ActivityChain") - if assert.NoError(t, err) { - metadata, err := client.WaitForOrchestrationCompletion(ctx, id) - if assert.NoError(t, err) { - assert.True(t, metadata.IsComplete()) - assert.Equal(t, `10`, metadata.SerializedOutput) - } - } + require.NoError(t, err) + metadata, err := client.WaitForOrchestrationCompletion(ctx, id) + require.NoError(t, err) + assert.True(t, metadata.IsComplete()) + assert.Equal(t, `10`, metadata.SerializedOutput) }) } } @@ -239,7 +238,7 @@ func TestConcurrentActivityExecution(t *testing.T) { } // Sleep for 1 second to ensure that the test passes only if all activities execute in parallel. time.Sleep(1 * time.Second) - return fmt.Sprintf("%d", input), nil + return strconv.Itoa(input), nil }) ctx := context.Background() @@ -247,16 +246,14 @@ func TestConcurrentActivityExecution(t *testing.T) { for _, opt := range GetTestOptions() { t.Run(opt(engine), func(t *testing.T) { id, err := client.ScheduleNewOrchestration(ctx, "ActivityFanOut") - if assert.NoError(t, err) { - metadata, err := client.WaitForOrchestrationCompletion(ctx, id) - if assert.NoError(t, err) { - assert.True(t, metadata.IsComplete()) - assert.Equal(t, `["9","8","7","6","5","4","3","2","1","0"]`, metadata.SerializedOutput) - - // Because all the activities run in parallel, they should complete very quickly - assert.Less(t, metadata.LastUpdatedAt.Sub(metadata.CreatedAt), 3*time.Second) - } - } + require.NoError(t, err) + metadata, err := client.WaitForOrchestrationCompletion(ctx, id) + require.NoError(t, err) + assert.True(t, metadata.IsComplete()) + assert.Equal(t, `["9","8","7","6","5","4","3","2","1","0"]`, metadata.SerializedOutput) + + // Because all the activities run in parallel, they should complete very quickly + assert.Less(t, metadata.LastUpdatedAt.Sub(metadata.CreatedAt), 3*time.Second) }) } } @@ -323,21 +320,19 @@ func TestRecreateCompletedWorkflow(t *testing.T) { // First workflow var metadata *api.OrchestrationMetadata id, err := client.ScheduleNewOrchestration(ctx, "EchoWorkflow", api.WithInput("echo!")) - if assert.NoError(t, err) { - if metadata, err = client.WaitForOrchestrationCompletion(ctx, id); assert.NoError(t, err) { - assert.True(t, metadata.IsComplete()) - assert.Equal(t, `"echo!"`, metadata.SerializedOutput) - } - } + require.NoError(t, err) + metadata, err = client.WaitForOrchestrationCompletion(ctx, id) + require.NoError(t, err) + assert.True(t, metadata.IsComplete()) + assert.Equal(t, `"echo!"`, metadata.SerializedOutput) // Second workflow, using the same ID as the first but a different input _, err = client.ScheduleNewOrchestration(ctx, "EchoWorkflow", api.WithInstanceID(id), api.WithInput(42)) - if assert.NoError(t, err) { - if metadata, err = client.WaitForOrchestrationCompletion(ctx, id); assert.NoError(t, err) { - assert.True(t, metadata.IsComplete()) - assert.Equal(t, `42`, metadata.SerializedOutput) - } - } + require.NoError(t, err) + metadata, err = client.WaitForOrchestrationCompletion(ctx, id) + require.NoError(t, err) + assert.True(t, metadata.IsComplete()) + assert.Equal(t, `42`, metadata.SerializedOutput) }) } } @@ -345,7 +340,7 @@ func TestRecreateCompletedWorkflow(t *testing.T) { func TestInternalActorsSetupForWF(t *testing.T) { ctx := context.Background() _, engine := startEngine(ctx, t, task.NewTaskRegistry()) - assert.Equal(t, 2, len(engine.GetInternalActorsMap())) + assert.Len(t, engine.GetInternalActorsMap(), 2) assert.Contains(t, engine.GetInternalActorsMap(), workflowActorType) assert.Contains(t, engine.GetInternalActorsMap(), activityActorType) } @@ -366,11 +361,10 @@ func TestRecreateRunningWorkflowFails(t *testing.T) { // Start the first workflow, which will not complete var metadata *api.OrchestrationMetadata id, err := client.ScheduleNewOrchestration(ctx, "SleepyWorkflow") - if assert.NoError(t, err) { - if metadata, err = client.WaitForOrchestrationStart(ctx, id); assert.NoError(t, err) { - assert.False(t, metadata.IsComplete()) - } - } + require.NoError(t, err) + metadata, err = client.WaitForOrchestrationStart(ctx, id) + require.NoError(t, err) + assert.False(t, metadata.IsComplete()) // Attempting to start a second workflow with the same ID should fail _, err = client.ScheduleNewOrchestration(ctx, "SleepyWorkflow", api.WithInstanceID(id)) @@ -420,7 +414,7 @@ func TestRetryWorkflowOnTimeout(t *testing.T) { metadata, err := client.WaitForOrchestrationCompletion(timeoutCtx, id) require.NoError(t, err) assert.True(t, metadata.IsComplete()) - assert.Equal(t, fmt.Sprintf("%d", expectedCallCount), metadata.SerializedOutput) + assert.Equal(t, strconv.Itoa(expectedCallCount), metadata.SerializedOutput) }) } } @@ -470,7 +464,7 @@ func TestRetryActivityOnTimeout(t *testing.T) { metadata, err := client.WaitForOrchestrationCompletion(timeoutCtx, id) require.NoError(t, err) assert.True(t, metadata.IsComplete()) - assert.Equal(t, fmt.Sprintf("%d", expectedCallCount), metadata.SerializedOutput) + assert.Equal(t, strconv.Itoa(expectedCallCount), metadata.SerializedOutput) }) } } @@ -495,19 +489,17 @@ func TestConcurrentTimerExecution(t *testing.T) { for _, opt := range GetTestOptions() { t.Run(opt(engine), func(t *testing.T) { id, err := client.ScheduleNewOrchestration(ctx, "TimerFanOut") - if assert.NoError(t, err) { - // Add a 5 second timeout so that the test doesn't take forever if something isn't working - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() + require.NoError(t, err) + // Add a 5 second timeout so that the test doesn't take forever if something isn't working + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() - metadata, err := client.WaitForOrchestrationCompletion(timeoutCtx, id) - if assert.NoError(t, err) { - assert.True(t, metadata.IsComplete()) + metadata, err := client.WaitForOrchestrationCompletion(timeoutCtx, id) + require.NoError(t, err) + assert.True(t, metadata.IsComplete()) - // Because all the timers run in parallel, they should complete very quickly - assert.Less(t, metadata.LastUpdatedAt.Sub(metadata.CreatedAt), 3*time.Second) - } - } + // Because all the timers run in parallel, they should complete very quickly + assert.Less(t, metadata.LastUpdatedAt.Sub(metadata.CreatedAt), 3*time.Second) }) } } @@ -529,17 +521,15 @@ func TestRaiseEvent(t *testing.T) { for _, opt := range GetTestOptions() { t.Run(opt(engine), func(t *testing.T) { id, err := client.ScheduleNewOrchestration(ctx, "WorkflowForRaiseEvent") - if assert.NoError(t, err) { - metadata, err := client.WaitForOrchestrationStart(ctx, id) - if assert.NoError(t, err) { - assert.Equal(t, id, metadata.InstanceID) - client.RaiseEvent(ctx, id, "NameOfEventBeingRaised", api.WithEventPayload("NameOfInput")) - metadata, _ = client.WaitForOrchestrationCompletion(ctx, id) - assert.True(t, metadata.IsComplete()) - assert.Equal(t, `"Hello, NameOfInput!"`, metadata.SerializedOutput) - assert.Nil(t, metadata.FailureDetails) - } - } + require.NoError(t, err) + metadata, err := client.WaitForOrchestrationStart(ctx, id) + require.NoError(t, err) + assert.Equal(t, id, metadata.InstanceID) + client.RaiseEvent(ctx, id, "NameOfEventBeingRaised", api.WithEventPayload("NameOfInput")) + metadata, _ = client.WaitForOrchestrationCompletion(ctx, id) + assert.True(t, metadata.IsComplete()) + assert.Equal(t, `"Hello, NameOfInput!"`, metadata.SerializedOutput) + assert.Nil(t, metadata.FailureDetails) }) } } @@ -625,7 +615,7 @@ func TestPurge(t *testing.T) { assert.Greater(t, keyCounter.Load(), int64(10)) err = client.PurgeOrchestrationState(ctx, id) - assert.NoError(t, err) + require.NoError(t, err) // Check that no key from the statestore containing the actor id is still present in the statestore keysPostPurge := []string{} @@ -691,7 +681,7 @@ func TestPurgeContinueAsNew(t *testing.T) { assert.Greater(t, keyCounter.Load(), int64(2)) err = client.PurgeOrchestrationState(ctx, id) - assert.NoError(t, err) + require.NoError(t, err) // Check that no key from the statestore containing the actor id is still present in the statestore keysPostPurge := []string{} @@ -723,19 +713,17 @@ func TestPauseResumeWorkflow(t *testing.T) { for _, opt := range GetTestOptions() { t.Run(opt(engine), func(t *testing.T) { id, err := client.ScheduleNewOrchestration(ctx, "PauseWorkflow") - if assert.NoError(t, err) { - metadata, err := client.WaitForOrchestrationStart(ctx, id) - if assert.NoError(t, err) { - assert.Equal(t, id, metadata.InstanceID) - client.SuspendOrchestration(ctx, id, "PauseWFReasonTest") - client.RaiseEvent(ctx, id, "WaitForThisEvent") - assert.True(t, metadata.IsRunning()) - client.ResumeOrchestration(ctx, id, "ResumeWFReasonTest") - metadata, _ = client.WaitForOrchestrationCompletion(ctx, id) - assert.True(t, metadata.IsComplete()) - assert.Nil(t, metadata.FailureDetails) - } - } + require.NoError(t, err) + metadata, err := client.WaitForOrchestrationStart(ctx, id) + require.NoError(t, err) + assert.Equal(t, id, metadata.InstanceID) + client.SuspendOrchestration(ctx, id, "PauseWFReasonTest") + client.RaiseEvent(ctx, id, "WaitForThisEvent") + assert.True(t, metadata.IsRunning()) + client.ResumeOrchestration(ctx, id, "ResumeWFReasonTest") + metadata, _ = client.WaitForOrchestrationCompletion(ctx, id) + assert.True(t, metadata.IsComplete()) + assert.Nil(t, metadata.FailureDetails) }) } } diff --git a/pkg/runtime/wfengine/workflow.go b/pkg/runtime/wfengine/workflow.go index 4c60575bb64..beb5a55fa8c 100644 --- a/pkg/runtime/wfengine/workflow.go +++ b/pkg/runtime/wfengine/workflow.go @@ -177,10 +177,10 @@ func (wf *workflowActor) createWorkflowInstance(ctx context.Context, actorID str if es := startEvent.GetExecutionStarted(); es == nil { return errors.New("invalid execution start event") } else { - if es.ParentInstance == nil { - wfLogger.Debugf("Workflow actor '%s': creating workflow '%s' with instanceId '%s'", actorID, es.Name, es.OrchestrationInstance.InstanceId) + if es.GetParentInstance() == nil { + wfLogger.Debugf("Workflow actor '%s': creating workflow '%s' with instanceId '%s'", actorID, es.GetName(), es.GetOrchestrationInstance().GetInstanceId()) } else { - wfLogger.Debugf("Workflow actor '%s': creating child workflow '%s' with instanceId '%s' parentWorkflow '%s' parentWorkflowId '%s'", es.Name, es.OrchestrationInstance.InstanceId, es.ParentInstance.Name, es.ParentInstance.OrchestrationInstance.InstanceId) + wfLogger.Debugf("Workflow actor '%s': creating child workflow '%s' with instanceId '%s' parentWorkflow '%s' parentWorkflowId '%s'", es.GetName(), es.GetOrchestrationInstance().GetInstanceId(), es.GetParentInstance().GetName(), es.GetParentInstance().GetOrchestrationInstance().GetInstanceId()) } } @@ -341,9 +341,9 @@ func (wf *workflowActor) runWorkflow(ctx context.Context, actorID string, remind for _, e := range state.Inbox { var taskID int32 if ts := e.GetTaskCompleted(); ts != nil { - taskID = ts.TaskScheduledId + taskID = ts.GetTaskScheduledId() } else if tf := e.GetTaskFailed(); tf != nil { - taskID = tf.TaskScheduledId + taskID = tf.GetTaskScheduledId() } else { continue } @@ -423,11 +423,11 @@ func (wf *workflowActor) runWorkflow(ctx context.Context, actorID string, remind if err != nil { return fmt.Errorf("failed to marshal pending timer data: %w", err) } - delay := time.Until(tf.FireAt.AsTime()) + delay := time.Until(tf.GetFireAt().AsTime()) if delay < 0 { delay = 0 } - reminderPrefix := fmt.Sprintf("timer-%d", tf.TimerId) + reminderPrefix := fmt.Sprintf("timer-%d", tf.GetTimerId()) data := NewDurableTimer(timerBytes, state.Generation) wfLogger.Debugf("Workflow actor '%s': creating reminder '%s' for the durable timer", actorID, reminderPrefix) if _, err := wf.createReliableReminder(ctx, actorID, reminderPrefix, data, delay); err != nil { @@ -467,7 +467,7 @@ func (wf *workflowActor) runWorkflow(ctx context.Context, actorID string, remind if err != nil { return err } - targetActorID := getActivityActorID(actorID, e.EventId, state.Generation) + targetActorID := getActivityActorID(actorID, e.GetEventId(), state.Generation) wfLogger.Debugf("Workflow actor '%s': invoking execute method on activity actor '%s'", actorID, targetActorID) req := invokev1. @@ -482,10 +482,10 @@ func (wf *workflowActor) runWorkflow(ctx context.Context, actorID string, remind resp, err := wf.actors.Call(ctx, req) if err != nil { if errors.Is(err, ErrDuplicateInvocation) { - wfLogger.Warnf("Workflow actor '%s': activity invocation '%s::%d' was flagged as a duplicate and will be skipped", actorID, ts.Name, e.EventId) + wfLogger.Warnf("Workflow actor '%s': activity invocation '%s::%d' was flagged as a duplicate and will be skipped", actorID, ts.GetName(), e.GetEventId()) continue } - return newRecoverableError(fmt.Errorf("failed to invoke activity actor '%s' to execute '%s': %w", targetActorID, ts.Name, err)) + return newRecoverableError(fmt.Errorf("failed to invoke activity actor '%s' to execute '%s': %w", targetActorID, ts.GetName(), err)) } resp.Close() } @@ -606,9 +606,9 @@ func (wf *workflowActor) removeCompletedStateData(ctx context.Context, state *wo for _, e := range state.Inbox { var taskID int32 if ts := e.GetTaskCompleted(); ts != nil { - taskID = ts.TaskScheduledId + taskID = ts.GetTaskScheduledId() } else if tf := e.GetTaskFailed(); tf != nil { - taskID = tf.TaskScheduledId + taskID = tf.GetTaskScheduledId() } else { continue } diff --git a/pkg/runtime/wfengine/workflowstate_test.go b/pkg/runtime/wfengine/workflowstate_test.go index 90464edb10d..012b43302b5 100644 --- a/pkg/runtime/wfengine/workflowstate_test.go +++ b/pkg/runtime/wfengine/workflowstate_test.go @@ -43,7 +43,7 @@ const ( func TestNoWorkflowState(t *testing.T) { actors := getActorRuntime() state, err := wfengine.LoadWorkflowState(context.Background(), actors, "wf1", wfengine.NewActorsBackendConfig(testAppID)) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, state) } @@ -143,11 +143,11 @@ func TestLoadSavedState(t *testing.T) { assert.Equal(t, uint64(1), wfstate.Generation) require.Len(t, wfstate.History, 10) for i, e := range wfstate.History { - assert.Equal(t, int32(i), e.EventId) + assert.Equal(t, int32(i), e.GetEventId()) } require.Len(t, wfstate.Inbox, 5) for i, e := range wfstate.Inbox { - assert.Equal(t, int32(i), e.EventId) + assert.Equal(t, int32(i), e.GetEventId()) } } @@ -156,9 +156,7 @@ func TestResetLoadedState(t *testing.T) { runtimeState := backend.NewOrchestrationRuntimeState(api.InstanceID("wf1"), nil) for i := 0; i < 10; i++ { - if err := runtimeState.AddEvent(&backend.HistoryEvent{}); !assert.NoError(t, err) { - return - } + require.NoError(t, runtimeState.AddEvent(&backend.HistoryEvent{})) } wfstate.ApplyRuntimeStateChanges(runtimeState) @@ -184,7 +182,7 @@ func TestResetLoadedState(t *testing.T) { req, err = wfstate.GetSaveRequest("wf1") require.NoError(t, err) - assert.Equal(t, 17, len(req.Operations)) // history x10 + inbox x5 + metadata + customStatus + assert.Len(t, req.Operations, 17) // history x10 + inbox x5 + metadata + customStatus upsertCount, deleteCount := countOperations(t, req) assert.Equal(t, 2, upsertCount) // metadata + customStatus assert.Equal(t, 15, deleteCount) // all history and inbox records are deleted diff --git a/pkg/security/legacy/legacy_test.go b/pkg/security/legacy/legacy_test.go index d116701fd5d..4e70bb1e1a0 100644 --- a/pkg/security/legacy/legacy_test.go +++ b/pkg/security/legacy/legacy_test.go @@ -186,7 +186,7 @@ func Test_NewServer(t *testing.T) { clientsvid, err := x509svid.ParseRaw(append(clientCertDER, issCert.Raw...), clientPKDER) require.NoError(t, err) - assert.NoError(t, dial(t, tlsconfig.MTLSClientConfig(clientsvid, rootCA, tlsconfig.AuthorizeAny()))) + require.NoError(t, dial(t, tlsconfig.MTLSClientConfig(clientsvid, rootCA, tlsconfig.AuthorizeAny()))) }) t.Run("if client uses a SVID but signed by different root, expect error", func(t *testing.T) { @@ -206,7 +206,7 @@ func Test_NewServer(t *testing.T) { require.NoError(t, err) err = dial(t, tlsconfig.MTLSClientConfig(clientsvid, diffRootCA, tlsconfig.AuthorizeAny())) - assert.ErrorContains(t, err, "x509: ECDSA verification failure") + require.ErrorContains(t, err, "x509: ECDSA verification failure") }) t.Run("if client uses DNS and is `cluster.local`, expect no error", func(t *testing.T) { @@ -220,7 +220,7 @@ func Test_NewServer(t *testing.T) { }, issCert, &clientPK.PublicKey, issKey) require.NoError(t, err) - assert.NoError(t, dial(t, &tls.Config{ + require.NoError(t, dial(t, &tls.Config{ RootCAs: rootPool, InsecureSkipVerify: true, //nolint: gosec // this is a test @@ -242,7 +242,7 @@ func Test_NewServer(t *testing.T) { }, issCert, &clientPK.PublicKey, issKey) require.NoError(t, err) - assert.NoError(t, dial(t, &tls.Config{ + require.NoError(t, dial(t, &tls.Config{ RootCAs: rootPool, InsecureSkipVerify: true, //nolint: gosec // this is a test Certificates: []tls.Certificate{ @@ -269,7 +269,7 @@ func Test_NewServer(t *testing.T) { {Certificate: [][]byte{clientCertDER, issCert.Raw}, PrivateKey: clientPK}, }, }) - assert.ErrorContains(t, err, "remote error: tls: bad certificate") + require.ErrorContains(t, err, "remote error: tls: bad certificate") }) t.Run("if client uses DNS but is from different root, expect error", func(t *testing.T) { @@ -290,7 +290,7 @@ func Test_NewServer(t *testing.T) { {Certificate: [][]byte{clientCertDER, diffCert.Raw}, PrivateKey: clientPK}, }, }) - assert.ErrorContains(t, err, "remote error: tls: bad certificate") + require.ErrorContains(t, err, "remote error: tls: bad certificate") }) } @@ -384,7 +384,7 @@ func Test_NewDialClient(t *testing.T) { serversvid, err := x509svid.ParseRaw(append(serverCertDER, issCert.Raw...), serverPKDER) require.NoError(t, err) - assert.NoError(t, serve(t, tlsconfig.MTLSServerConfig(serversvid, rootCA, tlsconfig.AuthorizeAny()))) + require.NoError(t, serve(t, tlsconfig.MTLSServerConfig(serversvid, rootCA, tlsconfig.AuthorizeAny()))) }) t.Run("if server uses a SVID but signed by different root, expect error", func(t *testing.T) { @@ -404,7 +404,7 @@ func Test_NewDialClient(t *testing.T) { require.NoError(t, err) err = serve(t, tlsconfig.MTLSServerConfig(serversvid, diffRootCA, tlsconfig.AuthorizeAny())) - assert.ErrorContains(t, err, "x509: ECDSA verification failure") + require.ErrorContains(t, err, "x509: ECDSA verification failure") }) t.Run("if server uses DNS and is `cluster.local`, expect no error", func(t *testing.T) { @@ -418,7 +418,7 @@ func Test_NewDialClient(t *testing.T) { }, issCert, &serverPK.PublicKey, issKey) require.NoError(t, err) - assert.NoError(t, serve(t, &tls.Config{ + require.NoError(t, serve(t, &tls.Config{ RootCAs: rootPool, InsecureSkipVerify: true, //nolint: gosec // this is a test Certificates: []tls.Certificate{ @@ -438,7 +438,7 @@ func Test_NewDialClient(t *testing.T) { }, issCert, &serverPK.PublicKey, issKey) require.NoError(t, err) - assert.NoError(t, serve(t, &tls.Config{ + require.NoError(t, serve(t, &tls.Config{ RootCAs: rootPool, InsecureSkipVerify: true, //nolint: gosec // this is a test Certificates: []tls.Certificate{ @@ -465,7 +465,7 @@ func Test_NewDialClient(t *testing.T) { {Certificate: [][]byte{serverCertDER, issCert.Raw}, PrivateKey: serverPK}, }, }) - assert.ErrorContains(t, err, "x509svid: could not get leaf SPIFFE ID: certificate contains no URI SAN\nx509: certificate is valid for no-cluster.foo.local, local.cluster, not cluster.local") + require.ErrorContains(t, err, "x509svid: could not get leaf SPIFFE ID: certificate contains no URI SAN\nx509: certificate is valid for no-cluster.foo.local, local.cluster, not cluster.local") }) t.Run("if server uses DNS but is from different root, expect error", func(t *testing.T) { @@ -486,6 +486,6 @@ func Test_NewDialClient(t *testing.T) { {Certificate: [][]byte{serverCertDER, diffCert.Raw}, PrivateKey: serverPK}, }, }) - assert.ErrorContains(t, err, "x509svid: could not get leaf SPIFFE ID: certificate contains no URI SAN\nx509: certificate is valid for no-cluster.foo.local, local.cluster, not cluster.local") + require.ErrorContains(t, err, "x509svid: could not get leaf SPIFFE ID: certificate contains no URI SAN\nx509: certificate is valid for no-cluster.foo.local, local.cluster, not cluster.local") }) } diff --git a/pkg/security/security_test.go b/pkg/security/security_test.go index a8315313a77..c223ea7870f 100644 --- a/pkg/security/security_test.go +++ b/pkg/security/security_test.go @@ -180,21 +180,21 @@ func TestCurrentNamespace(t *testing.T) { } }) ns, err := CurrentNamespaceOrError() - assert.Error(t, err) + require.Error(t, err) assert.Empty(t, ns) }) t.Run("error if namespace is set but empty", func(t *testing.T) { t.Setenv("NAMESPACE", "") ns, err := CurrentNamespaceOrError() - assert.Error(t, err) + require.Error(t, err) assert.Empty(t, ns) }) t.Run("returns namespace if set", func(t *testing.T) { t.Setenv("NAMESPACE", "foo") ns, err := CurrentNamespaceOrError() - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "foo", ns) }) } diff --git a/pkg/security/x509source.go b/pkg/security/x509source.go index 6955b5fc79e..c578f7e3e96 100644 --- a/pkg/security/x509source.go +++ b/pkg/security/x509source.go @@ -431,11 +431,13 @@ func atomicWrite(clock clock.Clock, dir string, data map[string][]byte) error { } } + const filesuffix = "-new" + // Symlinks are typically not available on Windows containers, so we use a // copy and rename instead. if runtime.GOOS == "windows" { - os.RemoveAll(dir + "-new") - if err := os.MkdirAll(dir+"-new", 0o700); err != nil { + os.RemoveAll(dir + filesuffix) + if err := os.MkdirAll(dir+filesuffix, 0o700); err != nil { return err } @@ -448,20 +450,20 @@ func atomicWrite(clock clock.Clock, dir string, data map[string][]byte) error { if err != nil { return err } - if err := os.WriteFile(filepath.Join(dir+"-new", entry.Name()), b, 0o600); err != nil { + if err := os.WriteFile(filepath.Join(dir+filesuffix, entry.Name()), b, 0o600); err != nil { return err } } // You can't rename a directory over an existing directory. os.RemoveAll(dir) - if err := os.Rename(dir+"-new", dir); err != nil { - return fmt.Errorf("failed to rename %s to %s: %w", dir+"-new", dir, err) + if err := os.Rename(dir+filesuffix, dir); err != nil { + return fmt.Errorf("failed to rename %s to %s: %w", dir+filesuffix, dir, err) } } else { - if err := os.Symlink(newDir, dir+"-new"); err != nil { + if err := os.Symlink(newDir, dir+filesuffix); err != nil { return err } - if err := os.Rename(dir+"-new", dir); err != nil { + if err := os.Rename(dir+filesuffix, dir); err != nil { return err } } diff --git a/pkg/sentry/server/ca/ca_test.go b/pkg/sentry/server/ca/ca_test.go index 35bdb8be929..f0644c91201 100644 --- a/pkg/sentry/server/ca/ca_test.go +++ b/pkg/sentry/server/ca/ca_test.go @@ -75,9 +75,9 @@ func TestNew(t *testing.T) { issuerKeyPK, err := pem.DecodePEMPrivateKey(issuerKey) require.NoError(t, err) - assert.NoError(t, issuerCertX509[0].CheckSignatureFrom(rootCertX509[0])) + require.NoError(t, issuerCertX509[0].CheckSignatureFrom(rootCertX509[0])) ok, err := pem.PublicKeysEqual(issuerCertX509[0].PublicKey, issuerKeyPK.Public()) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, ok) }) @@ -182,8 +182,8 @@ func TestSignIdentity(t *testing.T) { assert.ElementsMatch(t, clientCert[0].DNSNames, []string{"my-app-id.my-test-namespace.svc.cluster.local", "example.com"}) require.Len(t, clientCert[0].URIs, 1) - assert.Equal(t, clientCert[0].URIs[0].String(), "spiffe://example.test.dapr.io/ns/my-test-namespace/my-app-id") + assert.Equal(t, "spiffe://example.test.dapr.io/ns/my-test-namespace/my-app-id", clientCert[0].URIs[0].String()) - assert.NoError(t, clientCert[0].CheckSignatureFrom(int2Crt)) + require.NoError(t, clientCert[0].CheckSignatureFrom(int2Crt)) }) } diff --git a/pkg/sentry/server/ca/selfhosted_test.go b/pkg/sentry/server/ca/selfhosted_test.go index bbb355447af..dc72b10a3d4 100644 --- a/pkg/sentry/server/ca/selfhosted_test.go +++ b/pkg/sentry/server/ca/selfhosted_test.go @@ -50,7 +50,7 @@ func TestSelhosted_store(t *testing.T) { }, } - assert.NoError(t, s.store(context.Background(), Bundle{ + require.NoError(t, s.store(context.Background(), Bundle{ TrustAnchors: []byte("root"), IssChainPEM: []byte("issuer"), IssKeyPEM: []byte("key"), @@ -61,27 +61,27 @@ func TestSelhosted_store(t *testing.T) { require.FileExists(t, keyFile) info, err := os.Stat(rootFile) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, writePerm, info.Mode().Perm()) info, err = os.Stat(issuerFile) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, writePerm, info.Mode().Perm()) info, err = os.Stat(keyFile) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, writePerm, info.Mode().Perm()) b, err := os.ReadFile(rootFile) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "root", string(b)) b, err = os.ReadFile(issuerFile) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "issuer", string(b)) b, err = os.ReadFile(keyFile) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "key", string(b)) }) } diff --git a/pkg/sentry/server/server.go b/pkg/sentry/server/server.go index 47459847ba2..76f1aa8fda5 100644 --- a/pkg/sentry/server/server.go +++ b/pkg/sentry/server/server.go @@ -108,45 +108,45 @@ func (s *server) SignCertificate(ctx context.Context, req *sentryv1pb.SignCertif func (s *server) signCertificate(ctx context.Context, req *sentryv1pb.SignCertificateRequest) (*sentryv1pb.SignCertificateResponse, error) { validator := s.defaultValidator - if req.TokenValidator != sentryv1pb.SignCertificateRequest_UNKNOWN && req.TokenValidator.String() != "" { - validator = req.TokenValidator + if req.GetTokenValidator() != sentryv1pb.SignCertificateRequest_UNKNOWN && req.GetTokenValidator().String() != "" { + validator = req.GetTokenValidator() } if validator == sentryv1pb.SignCertificateRequest_UNKNOWN { - log.Debugf("Validator '%s' is not known for %s/%s", validator.String(), req.Namespace, req.Id) + log.Debugf("Validator '%s' is not known for %s/%s", validator.String(), req.GetNamespace(), req.GetId()) return nil, status.Error(codes.InvalidArgument, "a validator name must be specified in this environment") } if _, ok := s.vals[validator]; !ok { - log.Debugf("Validator '%s' is not enabled for %s/%s", validator.String(), req.Namespace, req.Id) + log.Debugf("Validator '%s' is not enabled for %s/%s", validator.String(), req.GetNamespace(), req.GetId()) return nil, status.Error(codes.InvalidArgument, "the requested validator is not enabled") } - log.Debugf("Processing SignCertificate request for %s/%s (validator: %s)", req.Namespace, req.Id, validator.String()) + log.Debugf("Processing SignCertificate request for %s/%s (validator: %s)", req.GetNamespace(), req.GetId(), validator.String()) trustDomain, overrideDuration, err := s.vals[validator].Validate(ctx, req) if err != nil { - log.Debugf("Failed to validate request for %s/%s: %s", req.Namespace, req.Id, err) + log.Debugf("Failed to validate request for %s/%s: %s", req.GetNamespace(), req.GetId(), err) return nil, status.Error(codes.PermissionDenied, err.Error()) } der, _ := pem.Decode(req.GetCertificateSigningRequest()) if der == nil { - log.Debugf("Invalid CSR: PEM block is nil for %s/%s", req.Namespace, req.Id) + log.Debugf("Invalid CSR: PEM block is nil for %s/%s", req.GetNamespace(), req.GetId()) return nil, status.Error(codes.InvalidArgument, "invalid certificate signing request") } // TODO: @joshvanl: Before v1.12, daprd was sending CSRs with the PEM block type "CERTIFICATE" // After 1.14, allow only "CERTIFICATE REQUEST" if der.Type != "CERTIFICATE REQUEST" && der.Type != "CERTIFICATE" { - log.Debugf("Invalid CSR: PEM block type is invalid for %s/%s: %s", req.Namespace, req.Id, der.Type) + log.Debugf("Invalid CSR: PEM block type is invalid for %s/%s: %s", req.GetNamespace(), req.GetId(), der.Type) return nil, status.Error(codes.InvalidArgument, "invalid certificate signing request") } csr, err := x509.ParseCertificateRequest(der.Bytes) if err != nil { - log.Debugf("Failed to parse CSR for %s/%s: %v", req.Namespace, req.Id, err) + log.Debugf("Failed to parse CSR for %s/%s: %v", req.GetNamespace(), req.GetId(), err) return nil, status.Errorf(codes.InvalidArgument, "failed to parse certificate signing request: %v", err) } if csr.CheckSignature() != nil { - log.Debugf("Invalid CSR: invalid signature for %s/%s", req.Namespace, req.Id) + log.Debugf("Invalid CSR: invalid signature for %s/%s", req.GetNamespace(), req.GetId()) return nil, status.Error(codes.InvalidArgument, "invalid signature") } @@ -157,24 +157,24 @@ func (s *server) signCertificate(ctx context.Context, req *sentryv1pb.SignCertif // compatibility. Remove after v1.14. var dns []string switch { - case req.Namespace == security.CurrentNamespace() && req.Id == "dapr-injector": - dns = []string{fmt.Sprintf("dapr-sidecar-injector.%s.svc", req.Namespace)} - case req.Namespace == security.CurrentNamespace() && req.Id == "dapr-operator": + case req.GetNamespace() == security.CurrentNamespace() && req.GetId() == "dapr-injector": + dns = []string{fmt.Sprintf("dapr-sidecar-injector.%s.svc", req.GetNamespace())} + case req.GetNamespace() == security.CurrentNamespace() && req.GetId() == "dapr-operator": // TODO: @joshvanl: before v1.12, daprd was matching on the operator server // having `cluster.local` as a DNS SAN name. Remove after v1.13. - dns = []string{"cluster.local", fmt.Sprintf("dapr-webhook.%s.svc", req.Namespace)} - case req.Namespace == security.CurrentNamespace() && req.Id == "dapr-placement": + dns = []string{"cluster.local", fmt.Sprintf("dapr-webhook.%s.svc", req.GetNamespace())} + case req.GetNamespace() == security.CurrentNamespace() && req.GetId() == "dapr-placement": dns = []string{"cluster.local"} default: - dns = []string{fmt.Sprintf("%s.%s.svc.cluster.local", req.Id, req.Namespace)} + dns = []string{fmt.Sprintf("%s.%s.svc.cluster.local", req.GetId(), req.GetNamespace())} } chain, err := s.ca.SignIdentity(ctx, &ca.SignRequest{ PublicKey: csr.PublicKey, SignatureAlgorithm: csr.SignatureAlgorithm, TrustDomain: trustDomain.String(), - Namespace: req.Namespace, - AppID: req.Id, + Namespace: req.GetNamespace(), + AppID: req.GetId(), DNS: dns, }, overrideDuration) if err != nil { @@ -188,7 +188,7 @@ func (s *server) signCertificate(ctx context.Context, req *sentryv1pb.SignCertif return nil, status.Error(codes.Internal, "failed to encode certificate chain") } - log.Debugf("Successfully signed certificate for %s/%s", req.Namespace, req.Id) + log.Debugf("Successfully signed certificate for %s/%s", req.GetNamespace(), req.GetId()) return &sentryv1pb.SignCertificateResponse{ WorkloadCertificate: chainPEM, diff --git a/pkg/sentry/server/server_test.go b/pkg/sentry/server/server_test.go index 80548cde40d..282d8dc5f40 100644 --- a/pkg/sentry/server/server_test.go +++ b/pkg/sentry/server/server_test.go @@ -320,7 +320,7 @@ func TestRun(t *testing.T) { go func() { defer close(serverClosed) - assert.NoError(t, Start(ctx, opts)) + require.NoError(t, Start(ctx, opts)) }() require.Eventually(t, func() bool { @@ -335,7 +335,7 @@ func TestRun(t *testing.T) { conn, err := grpc.DialContext(ctx, fmt.Sprintf(":%d", port), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) t.Cleanup(func() { - assert.NoError(t, conn.Close()) + require.NoError(t, conn.Close()) }) client := sentryv1pb.NewCAClient(conn) @@ -346,9 +346,9 @@ func TestRun(t *testing.T) { require.Equalf(t, test.expResp == nil, resp == nil, "expected response to be nil: %v", resp) if test.expResp != nil { - assert.Equal(t, test.expResp.TrustChainCertificates, resp.TrustChainCertificates) - assert.Equal(t, test.expResp.ValidUntil, resp.ValidUntil) - assert.Equal(t, test.expResp.WorkloadCertificate, resp.WorkloadCertificate) + assert.Equal(t, test.expResp.GetTrustChainCertificates(), resp.GetTrustChainCertificates()) + assert.Equal(t, test.expResp.GetValidUntil(), resp.GetValidUntil()) + assert.Equal(t, test.expResp.GetWorkloadCertificate(), resp.GetWorkloadCertificate()) } }) } diff --git a/pkg/sentry/server/validator/internal/common.go b/pkg/sentry/server/validator/internal/common.go index 435eaa14c57..92773b72af1 100644 --- a/pkg/sentry/server/validator/internal/common.go +++ b/pkg/sentry/server/validator/internal/common.go @@ -27,10 +27,10 @@ import ( // Validate validates the common rules for all requests. func Validate(_ context.Context, req *sentryv1pb.SignCertificateRequest) (spiffeid.TrustDomain, bool, error) { err := errors.Join( - validation.ValidateSelfHostedAppID(req.Id), - appIDLessOrEqualTo64Characters(req.Id), - csrIsRequired(req.CertificateSigningRequest), - namespaceIsRequired(req.Namespace), + validation.ValidateSelfHostedAppID(req.GetId()), + appIDLessOrEqualTo64Characters(req.GetId()), + csrIsRequired(req.GetCertificateSigningRequest()), + namespaceIsRequired(req.GetNamespace()), ) if err != nil { return spiffeid.TrustDomain{}, false, fmt.Errorf("invalid request: %w", err) diff --git a/pkg/sentry/server/validator/jwks/jwks.go b/pkg/sentry/server/validator/jwks/jwks.go index 24510822311..10b331eb75f 100644 --- a/pkg/sentry/server/validator/jwks/jwks.go +++ b/pkg/sentry/server/validator/jwks/jwks.go @@ -83,7 +83,7 @@ func (j *jwks) Start(ctx context.Context) error { } func (j *jwks) Validate(ctx context.Context, req *sentryv1pb.SignCertificateRequest) (td spiffeid.TrustDomain, overrideDuration bool, err error) { - if req.Token == "" { + if req.GetToken() == "" { return td, false, errors.New("the request does not contain a token") } @@ -95,13 +95,13 @@ func (j *jwks) Validate(ctx context.Context, req *sentryv1pb.SignCertificateRequ } // Construct the expected value for the subject, which is the SPIFFE ID of the requestor - sub, err := spiffeid.FromSegments(td, "ns", req.Namespace, req.Id) + sub, err := spiffeid.FromSegments(td, "ns", req.GetNamespace(), req.GetId()) if err != nil { return td, false, fmt.Errorf("failed to construct SPIFFE ID for requestor: %w", err) } // Validate the authorization token - _, err = jwt.Parse([]byte(req.Token), + _, err = jwt.Parse([]byte(req.GetToken()), jwt.WithKeySet(j.cache.KeySet(), jws.WithInferAlgorithmFromKey(true)), jwt.WithAcceptableSkew(5*time.Minute), jwt.WithContext(ctx), diff --git a/pkg/sentry/server/validator/kubernetes/kubernetes.go b/pkg/sentry/server/validator/kubernetes/kubernetes.go index c2e5e337fb7..4f539df5f04 100644 --- a/pkg/sentry/server/validator/kubernetes/kubernetes.go +++ b/pkg/sentry/server/validator/kubernetes/kubernetes.go @@ -164,22 +164,22 @@ func (k *kubernetes) Validate(ctx context.Context, req *sentryv1pb.SignCertifica injectorRequesting = pod.Namespace == k.controlPlaneNS } - if saNamespace != req.Namespace { + if saNamespace != req.GetNamespace() { if injectorRequesting { overrideDuration = true } else { - return spiffeid.TrustDomain{}, false, fmt.Errorf("namespace mismatch; received namespace: %s", req.Namespace) + return spiffeid.TrustDomain{}, false, fmt.Errorf("namespace mismatch; received namespace: %s", req.GetNamespace()) } } if pod.Spec.ServiceAccountName != prts[3] { - log.Errorf("Service account on pod %s/%s does not match token", req.Namespace, claims.Pod.Name) + log.Errorf("Service account on pod %s/%s does not match token", req.GetNamespace(), claims.Pod.Name) return spiffeid.TrustDomain{}, false, errors.New("pod service account mismatch") } expID, isControlPlane, err := k.expectedID(&pod) if err != nil { - log.Errorf("Failed to get expected ID for pod %s/%s: %s", req.Namespace, claims.Pod.Name, err) + log.Errorf("Failed to get expected ID for pod %s/%s: %s", req.GetNamespace(), claims.Pod.Name, err) return spiffeid.TrustDomain{}, false, err } @@ -187,7 +187,7 @@ func (k *kubernetes) Validate(ctx context.Context, req *sentryv1pb.SignCertifica // for the ID containing their namespace and service account (ns:sa). This // is wrong- dapr identities are based on daprd namespace + _app ID_. // Remove this allowance in v1.13. - if pod.Namespace+":"+pod.Spec.ServiceAccountName == req.Id { + if pod.Namespace+":"+pod.Spec.ServiceAccountName == req.GetId() { req.Id = expID } @@ -201,11 +201,11 @@ func (k *kubernetes) Validate(ctx context.Context, req *sentryv1pb.SignCertifica // TODO: @joshvanl: Remove is v1.13 when injector no longer needs to request // daprd identities. - if expID != req.Id { + if expID != req.GetId() { if injectorRequesting { overrideDuration = true } else { - return spiffeid.TrustDomain{}, false, fmt.Errorf("app-id mismatch. expected: %s, received: %s", expID, req.Id) + return spiffeid.TrustDomain{}, false, fmt.Errorf("app-id mismatch. expected: %s, received: %s", expID, req.GetId()) } } @@ -220,7 +220,7 @@ func (k *kubernetes) Validate(ctx context.Context, req *sentryv1pb.SignCertifica } var config configv1alpha1.Configuration - err = k.client.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: configName}, &config) + err = k.client.Get(ctx, types.NamespacedName{Namespace: req.GetNamespace(), Name: configName}, &config) if err != nil { log.Errorf("Failed to get configuration %q: %v", configName, err) return spiffeid.TrustDomain{}, false, errors.New("failed to get configuration") diff --git a/pkg/testing/directmessaging_mock.go b/pkg/testing/directmessaging_mock.go index c88452f3bba..991dc0a99b0 100644 --- a/pkg/testing/directmessaging_mock.go +++ b/pkg/testing/directmessaging_mock.go @@ -77,7 +77,7 @@ func (_m *FailingDirectMessaging) Invoke(ctx context.Context, targetAppID string if err != nil { return &invokev1.InvokeMethodResponse{}, err } - err = _m.Failure.PerformFailure(string(r.Message.Data.Value)) + err = _m.Failure.PerformFailure(string(r.GetMessage().GetData().GetValue())) if err != nil { return &invokev1.InvokeMethodResponse{}, err } @@ -91,10 +91,10 @@ func (_m *FailingDirectMessaging) Invoke(ctx context.Context, targetAppID string for k, v := range md { headers[k] = v.GetValues() } - contentType := r.Message.GetContentType() + contentType := r.GetMessage().GetContentType() resp := invokev1. NewInvokeMethodResponse(int32(statusCode), http.StatusText(statusCode), nil). - WithRawDataBytes(r.Message.Data.Value). + WithRawDataBytes(r.GetMessage().GetData().GetValue()). WithHTTPHeaders(headers). WithContentType(contentType) return resp, nil diff --git a/pkg/testing/grpc/server.go b/pkg/testing/grpc/server.go index f3f2a13fb45..30e7bde9f09 100644 --- a/pkg/testing/grpc/server.go +++ b/pkg/testing/grpc/server.go @@ -20,11 +20,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/test/bufconn" + "github.com/stretchr/testify/require" + runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/kit/logger" ) @@ -99,7 +100,7 @@ func TestServerWithDialer[TServer any](logger logger.Logger, registersvc func(*g func StartTestAppCallbackGRPCServer(t *testing.T, port int, mockServer runtimev1pb.AppCallbackServer) *grpc.Server { lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - assert.NoError(t, err) + require.NoError(t, err) grpcServer := grpc.NewServer() go func() { runtimev1pb.RegisterAppCallbackServer(grpcServer, mockServer) diff --git a/pkg/validation/validation_test.go b/pkg/validation/validation_test.go index 9631f6927f6..c00baaa693b 100644 --- a/pkg/validation/validation_test.go +++ b/pkg/validation/validation_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestValidateKubernetesAppID(t *testing.T) { @@ -27,32 +28,32 @@ func TestValidateKubernetesAppID(t *testing.T) { id += "a" } err := ValidateKubernetesAppID(id) - assert.Error(t, err) + require.Error(t, err) }) t.Run("invalid length if suffix -dapr is appended", func(t *testing.T) { // service name id+"-dapr" exceeds 63 characters (59 + 5 = 64) id := strings.Repeat("a", 59) err := ValidateKubernetesAppID(id) - assert.Error(t, err) + require.Error(t, err) }) t.Run("valid id", func(t *testing.T) { id := "my-app-id" err := ValidateKubernetesAppID(id) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("invalid char: .", func(t *testing.T) { id := "my-app-id.app" err := ValidateKubernetesAppID(id) - assert.Error(t, err) + require.Error(t, err) }) t.Run("invalid chars space", func(t *testing.T) { id := "my-app-id app" err := ValidateKubernetesAppID(id) - assert.Error(t, err) + require.Error(t, err) }) t.Run("invalid empty", func(t *testing.T) { @@ -66,19 +67,19 @@ func TestValidateSelfHostedAppID(t *testing.T) { t.Run("valid id", func(t *testing.T) { id := "my-app-id" err := ValidateSelfHostedAppID(id) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("contains a dot", func(t *testing.T) { id := "my-app-id.app" err := ValidateSelfHostedAppID(id) - assert.Error(t, err) + require.Error(t, err) }) t.Run("contains multiple dots", func(t *testing.T) { id := "foo.bar.baz" err := ValidateSelfHostedAppID(id) - assert.Error(t, err) + require.Error(t, err) }) t.Run("invalid empty", func(t *testing.T) { diff --git a/tests/apps/binding_input_grpc/app.go b/tests/apps/binding_input_grpc/app.go index 1f4f34adfde..792a7434fe1 100644 --- a/tests/apps/binding_input_grpc/app.go +++ b/tests/apps/binding_input_grpc/app.go @@ -118,9 +118,9 @@ func main() { //nolint:forbidigo func (s *server) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) { - fmt.Printf("Got invoked method %s and data: %s\n", in.Method, string(in.GetData().Value)) + fmt.Printf("Got invoked method %s and data: %s\n", in.GetMethod(), string(in.GetData().GetValue())) - switch in.Method { + switch in.GetMethod() { case "GetReceivedTopics": return s.GetReceivedTopics(ctx, in) } @@ -158,10 +158,10 @@ func (s *server) ListTopicSubscriptions(ctx context.Context, in *emptypb.Empty) // This method is fired whenever a message has been published to a topic that has been subscribed. Dapr sends published messages in a CloudEvents 1.0 envelope. func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventRequest) (*runtimev1pb.TopicEventResponse, error) { - log.Printf("Message arrived - Topic: %s, Message: %s\n", in.Topic, string(in.Data)) + log.Printf("Message arrived - Topic: %s, Message: %s\n", in.GetTopic(), string(in.GetData())) var message string - err := json.Unmarshal(in.Data, &message) + err := json.Unmarshal(in.GetData(), &message) log.Printf("Got message: %s", message) if err != nil { log.Printf("error parsing test-topic input binding payload: %s", err) @@ -192,6 +192,6 @@ func (s *server) ListInputBindings(ctx context.Context, in *emptypb.Empty) (*run // //nolint:forbidigo func (s *server) OnBindingEvent(ctx context.Context, in *runtimev1pb.BindingEventRequest) (*runtimev1pb.BindingEventResponse, error) { - fmt.Printf("Invoked from binding: %s - %s\n", in.Name, string(in.Data)) + fmt.Printf("Invoked from binding: %s - %s\n", in.GetName(), string(in.GetData())) return &runtimev1pb.BindingEventResponse{}, nil } diff --git a/tests/apps/binding_output/app.go b/tests/apps/binding_output/app.go index b630bf94bbb..64983c714fa 100644 --- a/tests/apps/binding_output/app.go +++ b/tests/apps/binding_output/app.go @@ -168,7 +168,7 @@ func getReceivedTopicsGRPC(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) return } - w.Write(resp.Data.Value) + w.Write(resp.GetData().GetValue()) } // appRouter initializes restful api router diff --git a/tests/apps/configurationapp/app.go b/tests/apps/configurationapp/app.go index d0564c42932..bbffd426f32 100644 --- a/tests/apps/configurationapp/app.go +++ b/tests/apps/configurationapp/app.go @@ -416,8 +416,8 @@ func subscribeHandlerGRPC(client runtimev1pb.Dapr_SubscribeConfigurationClient) configurationItems := make(map[string]*Item) for key, item := range rsp.GetItems() { configurationItems[key] = &Item{ - Value: item.Value, - Version: item.Version, + Value: item.GetValue(), + Version: item.GetVersion(), } } receivedItemsInBytes, _ := json.Marshal(configurationItems) diff --git a/tests/apps/healthapp/app.go b/tests/apps/healthapp/app.go index e2085a67a02..c20c35ff34e 100644 --- a/tests/apps/healthapp/app.go +++ b/tests/apps/healthapp/app.go @@ -412,8 +412,8 @@ func (s *grpcServer) HealthCheck(ctx context.Context, _ *emptypb.Empty) (*runtim } func (s *grpcServer) OnInvoke(_ context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) { - if in.Method == "foo" { - log.Println("Received method invocation: " + in.Method) + if in.GetMethod() == "foo" { + log.Println("Received method invocation: " + in.GetMethod()) return &commonv1pb.InvokeResponse{ Data: &anypb.Any{ Value: []byte("🤗"), @@ -421,7 +421,7 @@ func (s *grpcServer) OnInvoke(_ context.Context, in *commonv1pb.InvokeRequest) ( }, nil } - return nil, errors.New("unexpected method invocation: " + in.Method) + return nil, errors.New("unexpected method invocation: " + in.GetMethod()) } func (s *grpcServer) ListTopicSubscriptions(_ context.Context, in *emptypb.Empty) (*runtimev1pb.ListTopicSubscriptionsResponse, error) { @@ -436,13 +436,13 @@ func (s *grpcServer) ListTopicSubscriptions(_ context.Context, in *emptypb.Empty } func (s *grpcServer) OnTopicEvent(_ context.Context, in *runtimev1pb.TopicEventRequest) (*runtimev1pb.TopicEventResponse, error) { - if in.Topic == "mytopic" { - log.Println("Received topic event: " + in.Topic) + if in.GetTopic() == "mytopic" { + log.Println("Received topic event: " + in.GetTopic()) lastTopicMessage.Record() return &runtimev1pb.TopicEventResponse{}, nil } - return nil, errors.New("unexpected topic event: " + in.Topic) + return nil, errors.New("unexpected topic event: " + in.GetTopic()) } func (s *grpcServer) ListInputBindings(_ context.Context, in *emptypb.Empty) (*runtimev1pb.ListInputBindingsResponse, error) { @@ -452,13 +452,13 @@ func (s *grpcServer) ListInputBindings(_ context.Context, in *emptypb.Empty) (*r } func (s *grpcServer) OnBindingEvent(_ context.Context, in *runtimev1pb.BindingEventRequest) (*runtimev1pb.BindingEventResponse, error) { - if in.Name == "schedule" { - log.Println("Received binding event: " + in.Name) + if in.GetName() == "schedule" { + log.Println("Received binding event: " + in.GetName()) lastInputBinding.Record() return &runtimev1pb.BindingEventResponse{}, nil } - return nil, errors.New("unexpected binding event: " + in.Name) + return nil, errors.New("unexpected binding event: " + in.GetName()) } type healthCheck struct { diff --git a/tests/apps/perf/configuration/app.go b/tests/apps/perf/configuration/app.go index 622aa9550c1..507bf664ccd 100644 --- a/tests/apps/perf/configuration/app.go +++ b/tests/apps/perf/configuration/app.go @@ -416,7 +416,7 @@ func unsubscribeGRPC(subscriptionID string) (string, error) { if err != nil { return "", fmt.Errorf("error unsubscribing config updates: %w", err) } - if !resp.Ok { + if !resp.GetOk() { return "", fmt.Errorf("error unsubscribing config updates: %s", resp.GetMessage()) } return resp.GetMessage(), nil diff --git a/tests/apps/pubsub-publisher/app.go b/tests/apps/pubsub-publisher/app.go index ba754c6413c..9fcdec5e4ec 100644 --- a/tests/apps/pubsub-publisher/app.go +++ b/tests/apps/pubsub-publisher/app.go @@ -268,8 +268,8 @@ func performBulkPublish(w http.ResponseWriter, r *http.Request) { if err != nil { log.Printf("(%s) BulkPublish failed with error=%v, StatusCode=%d", reqID, err, status) log.Printf("(%s) BulkPublish failed with bulkRes errorCode=%v", reqID, bulkRes) - for _, stat := range bulkRes.FailedEntries { - log.Printf("Failed event with entry ID (%s) and error %s", stat.EntryId, stat.Error) + for _, stat := range bulkRes.GetFailedEntries() { + log.Printf("Failed event with entry ID (%s) and error %s", stat.GetEntryId(), stat.GetError()) } w.WriteHeader(status) @@ -316,7 +316,7 @@ func performBulkPublishGRPC(reqID string, pubsubToPublish, topic string, entries Metadata: reqMeta, Entries: entries, } - log.Printf("Pubsub to publish to is %s", req.PubsubName) + log.Printf("Pubsub to publish to is %s", req.GetPubsubName()) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) res, err := grpcClient.BulkPublishEventAlpha1(ctx, req) cancel() @@ -585,7 +585,7 @@ func callSubscriberMethodGRPC(reqID, appName, method string) ([]byte, error) { return nil, err } - return resp.Data.Value, nil + return resp.GetData().GetValue(), nil } func callSubscriberMethodHTTP(reqID, appName, method string) ([]byte, error) { diff --git a/tests/apps/pubsub-subscriber-routing_grpc/app.go b/tests/apps/pubsub-subscriber-routing_grpc/app.go index f8e1ab381d4..706eb814bd6 100644 --- a/tests/apps/pubsub-subscriber-routing_grpc/app.go +++ b/tests/apps/pubsub-subscriber-routing_grpc/app.go @@ -121,20 +121,20 @@ func initializeSets() { // The payload carries a Method to identify the method, a set of metadata properties and an optional payload. func (s *server) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) { reqID := "s-" + uuid.New().String() - if in.HttpExtension != nil && in.HttpExtension.Querystring != "" { - qs, err := url.ParseQuery(in.HttpExtension.Querystring) + if in.GetHttpExtension() != nil && in.GetHttpExtension().GetQuerystring() != "" { + qs, err := url.ParseQuery(in.GetHttpExtension().GetQuerystring()) if err == nil && qs.Has("reqid") { reqID = qs.Get("reqid") } } - log.Printf("(%s) Got invoked method %s", reqID, in.Method) + log.Printf("(%s) Got invoked method %s", reqID, in.GetMethod()) lock.Lock() defer lock.Unlock() respBody := &anypb.Any{} - switch in.Method { + switch in.GetMethod() { case "getMessages": respBody.Value = s.getMessages(reqID) case "initialize": @@ -192,10 +192,10 @@ func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventReq defer lock.Unlock() reqID := uuid.New().String() - log.Printf("(%s) Message arrived - Topic: %s, Message: %s, Path: %s", reqID, in.Topic, string(in.Data), in.Path) + log.Printf("(%s) Message arrived - Topic: %s, Message: %s, Path: %s", reqID, in.GetTopic(), string(in.GetData()), in.GetPath()) var set *sets.Set[string] - switch in.Path { + switch in.GetPath() { case pathA: set = &routedMessagesA case pathB: @@ -209,14 +209,14 @@ func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventReq case pathF: set = &routedMessagesF default: - log.Printf("(%s) Responding with DROP. in.Path not found", reqID) + log.Printf("(%s) Responding with DROP. in.GetPath() not found", reqID) // Return success with DROP status to drop message. return &runtimev1pb.TopicEventResponse{ Status: runtimev1pb.TopicEventResponse_DROP, //nolint:nosnakecase }, nil } - msg := string(in.Data) + msg := string(in.GetData()) set.Insert(msg) @@ -235,6 +235,6 @@ func (s *server) ListInputBindings(ctx context.Context, in *emptypb.Empty) (*run // This method gets invoked every time a new event is fired from a registered binding. The message carries the binding name, a payload and optional metadata. func (s *server) OnBindingEvent(ctx context.Context, in *runtimev1pb.BindingEventRequest) (*runtimev1pb.BindingEventResponse, error) { - log.Printf("Invoked from binding: %s", in.Name) + log.Printf("Invoked from binding: %s", in.GetName()) return &runtimev1pb.BindingEventResponse{}, nil } diff --git a/tests/apps/pubsub-subscriber_grpc/app.go b/tests/apps/pubsub-subscriber_grpc/app.go index d2a5822a9bd..bf66100f53b 100644 --- a/tests/apps/pubsub-subscriber_grpc/app.go +++ b/tests/apps/pubsub-subscriber_grpc/app.go @@ -154,20 +154,20 @@ func initializeSets() { // The payload carries a Method to identify the method, a set of metadata properties and an optional payload. func (s *server) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) { reqID := "s-" + uuid.New().String() - if in.HttpExtension != nil && in.HttpExtension.Querystring != "" { - qs, err := url.ParseQuery(in.HttpExtension.Querystring) + if in.GetHttpExtension() != nil && in.GetHttpExtension().GetQuerystring() != "" { + qs, err := url.ParseQuery(in.GetHttpExtension().GetQuerystring()) if err == nil && qs.Has("reqid") { reqID = qs.Get("reqid") } } - log.Printf("(%s) Got invoked method %s", reqID, in.Method) + log.Printf("(%s) Got invoked method %s", reqID, in.GetMethod()) lock.Lock() defer lock.Unlock() respBody := &anypb.Any{} - switch in.Method { + switch in.GetMethod() { case "getMessages": respBody.Value = s.getMessages(reqID) case "initialize": @@ -327,7 +327,7 @@ func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventReq defer lock.Unlock() reqID := uuid.New().String() - log.Printf("(%s) Message arrived - Topic: %s, Message: %s", reqID, in.Topic, string(in.Data)) + log.Printf("(%s) Message arrived - Topic: %s, Message: %s", reqID, in.GetTopic(), string(in.GetData())) if respondWithRetry { log.Printf("(%s) Responding with RETRY", reqID) @@ -346,20 +346,20 @@ func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventReq }, nil } - if in.Data == nil { - log.Printf("(%s) Responding with DROP. in.Data is nil", reqID) + if in.GetData() == nil { + log.Printf("(%s) Responding with DROP. in.GetData() is nil", reqID) // Return success with DROP status to drop message return &runtimev1pb.TopicEventResponse{ Status: runtimev1pb.TopicEventResponse_DROP, //nolint:nosnakecase }, nil } - log.Printf("(%s) data %s and the content type (%s)", reqID, in.Data, in.DataContentType) + log.Printf("(%s) data %s and the content type (%s)", reqID, in.GetData(), in.GetDataContentType()) var msg string var err error - if !strings.Contains(in.Topic, "bulk") { + if !strings.Contains(in.GetTopic(), "bulk") { // This is the old flow where always the content type is application/json // and data is always json serialized - err = json.Unmarshal(in.Data, &msg) + err = json.Unmarshal(in.GetData(), &msg) if err != nil { log.Printf("(%s) Responding with DROP. Error while unmarshaling JSON data: %v", reqID, err) // Return success with DROP status to drop message @@ -367,7 +367,7 @@ func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventReq Status: runtimev1pb.TopicEventResponse_DROP, //nolint:nosnakecase }, err } - if strings.HasPrefix(in.Topic, pubsubRaw) { + if strings.HasPrefix(in.GetTopic(), pubsubRaw) { var actualMsg string err = json.Unmarshal([]byte(msg), &actualMsg) if err != nil { @@ -376,11 +376,11 @@ func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventReq msg = actualMsg } } - } else if strings.Contains(in.Topic, "bulk") { + } else if strings.Contains(in.GetTopic(), "bulk") { // In bulk publish data and data content type match is important and // enforced/expected - if in.DataContentType == "application/json" || in.DataContentType == "application/cloudevents+json" { - err = json.Unmarshal(in.Data, &msg) + if in.GetDataContentType() == "application/json" || in.GetDataContentType() == "application/cloudevents+json" { + err = json.Unmarshal(in.GetData(), &msg) if err != nil { log.Printf("(%s) Responding with DROP. Error while unmarshaling JSON data: %v", reqID, err) // Return success with DROP status to drop message @@ -388,42 +388,42 @@ func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventReq Status: runtimev1pb.TopicEventResponse_DROP, //nolint:nosnakecase }, err } - } else if strings.HasPrefix(in.DataContentType, "text/") { - msg = (string)(in.Data) - } else if strings.Contains(in.Topic, "raw") { + } else if strings.HasPrefix(in.GetDataContentType(), "text/") { + msg = (string)(in.GetData()) + } else if strings.Contains(in.GetTopic(), "raw") { // All raw payload topics are assumed to have "raw" in the name // this is for the bulk case // This is simply for E2E only .... // we are assuming raw payload is also a string here .... In general msg should be []byte only and compared as []byte // raw payload for bulk is set from a string so this scenario holds true - msg = string(in.Data) + msg = string(in.GetData()) } } - log.Printf("(%s) Received message: %s - %s", reqID, in.Topic, msg) + log.Printf("(%s) Received message: %s - %s", reqID, in.GetTopic(), msg) - if strings.HasPrefix(in.Topic, pubsubA) && !receivedMessagesA.Has(msg) { + if strings.HasPrefix(in.GetTopic(), pubsubA) && !receivedMessagesA.Has(msg) { receivedMessagesA.Insert(msg) - } else if strings.HasPrefix(in.Topic, pubsubB) && !receivedMessagesB.Has(msg) { + } else if strings.HasPrefix(in.GetTopic(), pubsubB) && !receivedMessagesB.Has(msg) { receivedMessagesB.Insert(msg) - } else if strings.HasPrefix(in.Topic, pubsubC) && !receivedMessagesC.Has(msg) { + } else if strings.HasPrefix(in.GetTopic(), pubsubC) && !receivedMessagesC.Has(msg) { receivedMessagesC.Insert(msg) - } else if strings.HasPrefix(in.Topic, pubsubRaw) && !receivedMessagesRaw.Has(msg) { + } else if strings.HasPrefix(in.GetTopic(), pubsubRaw) && !receivedMessagesRaw.Has(msg) { receivedMessagesRaw.Insert(msg) - } else if strings.HasSuffix(in.Topic, pubsubBulkTopic) && !receivedMessagesBulkTopic.Has(msg) { + } else if strings.HasSuffix(in.GetTopic(), pubsubBulkTopic) && !receivedMessagesBulkTopic.Has(msg) { receivedMessagesBulkTopic.Insert(msg) - } else if strings.HasSuffix(in.Topic, pubsubRawBulkTopic) && !receivedMessagesRawBulkTopic.Has(msg) { + } else if strings.HasSuffix(in.GetTopic(), pubsubRawBulkTopic) && !receivedMessagesRawBulkTopic.Has(msg) { receivedMessagesRawBulkTopic.Insert(msg) - } else if strings.HasSuffix(in.Topic, pubsubCEBulkTopic) && !receivedMessagesCEBulkTopic.Has(msg) { + } else if strings.HasSuffix(in.GetTopic(), pubsubCEBulkTopic) && !receivedMessagesCEBulkTopic.Has(msg) { receivedMessagesCEBulkTopic.Insert(msg) - } else if strings.HasSuffix(in.Topic, pubsubDefBulkTopic) && !receivedMessagesDefBulkTopic.Has(msg) { + } else if strings.HasSuffix(in.GetTopic(), pubsubDefBulkTopic) && !receivedMessagesDefBulkTopic.Has(msg) { receivedMessagesDefBulkTopic.Insert(msg) - } else if strings.HasPrefix(in.Topic, pubsubRawSubTopic) && !receivedMessagesSubRaw.Has(msg) { + } else if strings.HasPrefix(in.GetTopic(), pubsubRawSubTopic) && !receivedMessagesSubRaw.Has(msg) { receivedMessagesSubRaw.Insert(msg) - } else if strings.HasPrefix(in.Topic, pubsubCESubTopic) && !receivedMessagesSubCE.Has(msg) { + } else if strings.HasPrefix(in.GetTopic(), pubsubCESubTopic) && !receivedMessagesSubCE.Has(msg) { receivedMessagesSubCE.Insert(msg) } else { - log.Printf("(%s) Received duplicate message: %s - %s", reqID, in.Topic, msg) + log.Printf("(%s) Received duplicate message: %s - %s", reqID, in.GetTopic(), msg) } if respondWithEmptyJSON { @@ -439,59 +439,59 @@ func (s *server) OnTopicEvent(ctx context.Context, in *runtimev1pb.TopicEventReq func (s *server) OnBulkTopicEventAlpha1(ctx context.Context, in *runtimev1pb.TopicEventBulkRequest) (*runtimev1pb.TopicEventBulkResponse, error) { reqID := uuid.New().String() - log.Printf("(%s) Entered in OnBulkTopicEventAlpha1 in Bulk Subscribe - Topic: %s", reqID, in.Topic) + log.Printf("(%s) Entered in OnBulkTopicEventAlpha1 in Bulk Subscribe - Topic: %s", reqID, in.GetTopic()) lock.Lock() defer lock.Unlock() - bulkResponses := make([]*runtimev1pb.TopicEventBulkResponseEntry, len(in.Entries)) + bulkResponses := make([]*runtimev1pb.TopicEventBulkResponseEntry, len(in.GetEntries())) - for i, entry := range in.Entries { - if entry.Event == nil { - log.Printf("(%s) Responding with DROP in bulk subscribe for entryId: %s. entry.Event is nil", reqID, entry.EntryId) + for i, entry := range in.GetEntries() { + if entry.GetEvent() == nil { + log.Printf("(%s) Responding with DROP in bulk subscribe for entryId: %s. entry.Event is nil", reqID, entry.GetEntryId()) // Return success with DROP status to drop message bulkResponses[i] = &runtimev1pb.TopicEventBulkResponseEntry{ - EntryId: entry.EntryId, + EntryId: entry.GetEntryId(), Status: runtimev1pb.TopicEventResponse_DROP, //nolint:nosnakecase } } var msg string - if strings.HasPrefix(in.Topic, pubsubCEBulkSubTopic) { - log.Printf("(%s) Message arrived in Bulk Subscribe - Topic: %s, Message: %s", reqID, in.Topic, string(entry.GetCloudEvent().Data)) - err := json.Unmarshal(entry.GetCloudEvent().Data, &msg) + if strings.HasPrefix(in.GetTopic(), pubsubCEBulkSubTopic) { + log.Printf("(%s) Message arrived in Bulk Subscribe - Topic: %s, Message: %s", reqID, in.GetTopic(), string(entry.GetCloudEvent().GetData())) + err := json.Unmarshal(entry.GetCloudEvent().GetData(), &msg) if err != nil { - log.Printf("(%s) Error extracing ce event in bulk subscribe for entryId: %s: %v", reqID, entry.EntryId, err) + log.Printf("(%s) Error extracing ce event in bulk subscribe for entryId: %s: %v", reqID, entry.GetEntryId(), err) bulkResponses[i] = &runtimev1pb.TopicEventBulkResponseEntry{ - EntryId: entry.EntryId, + EntryId: entry.GetEntryId(), Status: runtimev1pb.TopicEventResponse_DROP, //nolint:nosnakecase } continue } - log.Printf("(%s) Value of ce event in bulk subscribe for entryId: %s: %s", reqID, entry.EntryId, msg) + log.Printf("(%s) Value of ce event in bulk subscribe for entryId: %s: %s", reqID, entry.GetEntryId(), msg) } else { - log.Printf("(%s) Message arrived in Bulk Subscribe - Topic: %s, Message: %s", reqID, in.Topic, string(entry.GetBytes())) + log.Printf("(%s) Message arrived in Bulk Subscribe - Topic: %s, Message: %s", reqID, in.GetTopic(), string(entry.GetBytes())) err := json.Unmarshal(entry.GetBytes(), &msg) if err != nil { - log.Printf("(%s) Error extracing raw event in bulk subscribe for entryId: %s: %v", reqID, entry.EntryId, err) + log.Printf("(%s) Error extracing raw event in bulk subscribe for entryId: %s: %v", reqID, entry.GetEntryId(), err) // Return success with DROP status to drop message bulkResponses[i] = &runtimev1pb.TopicEventBulkResponseEntry{ - EntryId: entry.EntryId, + EntryId: entry.GetEntryId(), Status: runtimev1pb.TopicEventResponse_DROP, //nolint:nosnakecase } continue } - log.Printf("(%s) Value of raw event in bulk subscribe for entryId: %s: %s", reqID, entry.EntryId, msg) + log.Printf("(%s) Value of raw event in bulk subscribe for entryId: %s: %s", reqID, entry.GetEntryId(), msg) } bulkResponses[i] = &runtimev1pb.TopicEventBulkResponseEntry{ - EntryId: entry.EntryId, + EntryId: entry.GetEntryId(), Status: runtimev1pb.TopicEventResponse_SUCCESS, //nolint:nosnakecase } - if strings.HasPrefix(in.Topic, pubsubRawBulkSubTopic) && !receivedMessagesRawBulkSub.Has(msg) { + if strings.HasPrefix(in.GetTopic(), pubsubRawBulkSubTopic) && !receivedMessagesRawBulkSub.Has(msg) { receivedMessagesRawBulkSub.Insert(msg) - } else if strings.HasPrefix(in.Topic, pubsubCEBulkSubTopic) && !receivedMessagesCEBulkSub.Has(msg) { + } else if strings.HasPrefix(in.GetTopic(), pubsubCEBulkSubTopic) && !receivedMessagesCEBulkSub.Has(msg) { receivedMessagesCEBulkSub.Insert(msg) } else { - log.Printf("(%s) Received duplicate message in bulk subscribe: %s - %s", reqID, in.Topic, msg) + log.Printf("(%s) Received duplicate message in bulk subscribe: %s - %s", reqID, in.GetTopic(), msg) } } log.Printf("(%s) Responding with SUCCESS for bulk subscribe", reqID) @@ -509,6 +509,6 @@ func (s *server) ListInputBindings(ctx context.Context, in *emptypb.Empty) (*run // This method gets invoked every time a new event is fired from a registered binding. The message carries the binding name, a payload and optional metadata. func (s *server) OnBindingEvent(ctx context.Context, in *runtimev1pb.BindingEventRequest) (*runtimev1pb.BindingEventResponse, error) { - log.Printf("Invoked from binding: %s", in.Name) + log.Printf("Invoked from binding: %s", in.GetName()) return &runtimev1pb.BindingEventResponse{}, nil } diff --git a/tests/apps/service_invocation/app.go b/tests/apps/service_invocation/app.go index 4cd2365a320..11c790039c3 100644 --- a/tests/apps/service_invocation/app.go +++ b/tests/apps/service_invocation/app.go @@ -723,7 +723,7 @@ func testV1RequestGRPCToGRPC(w http.ResponseWriter, r *http.Request) { return } - reqHeadersString := resp.GetData().Value + reqHeadersString := resp.GetData().GetValue() respHeaders := map[string][]string{} for k, vals := range header { @@ -813,7 +813,7 @@ func testV1RequestGRPCToHTTP(w http.ResponseWriter, r *http.Request) { return } - reqHeadersString := resp.GetData().Value + reqHeadersString := resp.GetData().GetValue() respHeaders := map[string][]string{} for k, vals := range header { @@ -876,7 +876,7 @@ func grpcToGrpcTest(w http.ResponseWriter, r *http.Request) { return } - body := resp.Data.GetValue() + body := resp.GetData().GetValue() log.Printf("resp was %s\n", string(body)) var responseMessage appResponse @@ -1175,7 +1175,7 @@ func grpcToHTTPTest(w http.ResponseWriter, r *http.Request) { return } - body := resp.Data.GetValue() + body := resp.GetData().GetValue() fmt.Printf("resp was %s\n", string(body)) // var responseMessage string @@ -1318,8 +1318,8 @@ func badServiceCallTestGrpc(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) } - if resp != nil && resp.Data != nil { - rawBody := resp.Data.GetValue() + if resp != nil && resp.GetData() != nil { + rawBody := resp.GetData().GetValue() testResponse.RawBody = rawBody json.NewDecoder(strings.NewReader(string(rawBody))).Decode(&testResponse.Results) } diff --git a/tests/apps/service_invocation_grpc/app.go b/tests/apps/service_invocation_grpc/app.go index 5e73011570f..7bffa358127 100644 --- a/tests/apps/service_invocation_grpc/app.go +++ b/tests/apps/service_invocation_grpc/app.go @@ -127,16 +127,16 @@ func (s *server) retrieveRequestObject(ctx context.Context) ([]byte, error) { // OnInvoke This method gets invoked when a remote service has called the app through Dapr // The payload carries a Method to identify the method, a set of metadata properties and an optional payload func (s *server) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) { - fmt.Printf("Got invoked method %s and data: %s\n", in.Method, string(in.GetData().Value)) + fmt.Printf("Got invoked method %s and data: %s\n", in.GetMethod(), string(in.GetData().GetValue())) var err error var response []byte - switch in.Method { + switch in.GetMethod() { case "httpToGrpcTest": // not a typo, the handling is the same as the case below fallthrough case "grpcToGrpcTest", "grpcToGrpcWithoutVerbTest": - response, err = s.grpcTestHandler(in.GetData().Value) + response, err = s.grpcTestHandler(in.GetData().GetValue()) case "retrieve_request_object": response, err = s.retrieveRequestObject(ctx) } diff --git a/tests/apps/stateapp/app.go b/tests/apps/stateapp/app.go index 328a9bdf004..684139be718 100644 --- a/tests/apps/stateapp/app.go +++ b/tests/apps/stateapp/app.go @@ -649,12 +649,12 @@ func grpcHandler(w http.ResponseWriter, r *http.Request) { if err != nil { statusCode, res.Message = setErrorMessage("QueryState", err.Error()) } - if resp != nil && len(resp.Results) > 0 { - res.States = make([]daprState, 0, len(resp.Results)) - for _, r := range resp.Results { + if resp != nil && len(resp.GetResults()) > 0 { + res.States = make([]daprState, 0, len(resp.GetResults())) + for _, r := range resp.GetResults() { res.States = append(res.States, daprState{ - Key: r.Key, - Value: &appState{Data: r.Data}, + Key: r.GetKey(), + Value: &appState{Data: r.GetData()}, }) } } @@ -683,20 +683,20 @@ func daprState2Keys(states []daprState) []string { } func toDaprStates(response *runtimev1pb.GetBulkStateResponse) ([]daprState, error) { - result := make([]daprState, len(response.Items)) - for i, state := range response.Items { - if state.Error != "" { - return nil, fmt.Errorf("%s while getting bulk state", state.Error) + result := make([]daprState, len(response.GetItems())) + for i, state := range response.GetItems() { + if state.GetError() != "" { + return nil, fmt.Errorf("%s while getting bulk state", state.GetError()) } - daprStateItem, err := parseState(state.Key, state.Data) + daprStateItem, err := parseState(state.GetKey(), state.GetData()) if err != nil { return nil, err } result[i] = daprState{ - Key: state.Key, + Key: state.GetKey(), Value: daprStateItem, - Etag: state.Etag, - Metadata: state.Metadata, + Etag: state.GetEtag(), + Metadata: state.GetMetadata(), } } @@ -750,8 +750,8 @@ func getAllGRPC(states []daprState, statestore string, meta map[string]string) ( if err != nil { return nil, err } - log.Printf("found state for key %s, value is %s\n", state.Key, res.Data) - val, err := parseState(state.Key, res.Data) + log.Printf("found state for key %s, value is %s\n", state.Key, res.GetData()) + val, err := parseState(state.Key, res.GetData()) if err != nil { return nil, err } @@ -1019,24 +1019,24 @@ func etagTestGRPC(statestore string) error { } if opts.expectNotFound { - if len(res.Data) != 0 { - return "", fmt.Errorf("invalid value for state %d: %q (expected empty)", stateId, string(res.Data)) + if len(res.GetData()) != 0 { + return "", fmt.Errorf("invalid value for state %d: %q (expected empty)", stateId, string(res.GetData())) } return "", nil } - if len(res.Data) == 0 || string(res.Data) != opts.expectValue { - return "", fmt.Errorf("invalid value for state %d: %q (expected: %q)", stateId, string(res.Data), opts.expectValue) + if len(res.GetData()) == 0 || string(res.GetData()) != opts.expectValue { + return "", fmt.Errorf("invalid value for state %d: %q (expected: %q)", stateId, string(res.GetData()), opts.expectValue) } - if res.Etag == "" { + if res.GetEtag() == "" { return "", fmt.Errorf("etag is empty for state %d", stateId) } - if opts.expectEtagEqual != "" && res.Etag != opts.expectEtagEqual { - return "", fmt.Errorf("etag is invalid for state %d: %q (expected: %q)", stateId, res.Etag, opts.expectEtagEqual) + if opts.expectEtagEqual != "" && res.GetEtag() != opts.expectEtagEqual { + return "", fmt.Errorf("etag is invalid for state %d: %q (expected: %q)", stateId, res.GetEtag(), opts.expectEtagEqual) } - if opts.expectEtagNotEqual != "" && res.Etag == opts.expectEtagNotEqual { - return "", fmt.Errorf("etag is invalid for state %d: %q (expected different value)", stateId, res.Etag) + if opts.expectEtagNotEqual != "" && res.GetEtag() == opts.expectEtagNotEqual { + return "", fmt.Errorf("etag is invalid for state %d: %q (expected different value)", stateId, res.GetEtag()) } - return res.Etag, nil + return res.GetEtag(), nil } // First, write three values diff --git a/tests/e2e/actor_features/actor_features_test.go b/tests/e2e/actor_features/actor_features_test.go index 077f68b3786..3c662ca05a5 100644 --- a/tests/e2e/actor_features/actor_features_test.go +++ b/tests/e2e/actor_features/actor_features_test.go @@ -864,7 +864,7 @@ func TestActorFeatures(t *testing.T) { var currentMetadata metadata err = json.Unmarshal(res, ¤tMetadata) - assert.NoError(t, err, "error unmarshalling JSON") + require.NoError(t, err, "error unmarshalling JSON") assert.NotNil(t, currentMetadata, "metadata object is nil") assert.Equal(t, appName, currentMetadata.ID) diff --git a/tests/e2e/actor_invocation/actor_invocation_test.go b/tests/e2e/actor_invocation/actor_invocation_test.go index 50b2bcebf2d..fefa4e61e2f 100644 --- a/tests/e2e/actor_invocation/actor_invocation_test.go +++ b/tests/e2e/actor_invocation/actor_invocation_test.go @@ -120,7 +120,7 @@ func TestActorInvocation(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { _, status, err := utils.HTTPPostWithStatus(fmt.Sprintf(callActorURL, firstActorURL), body) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 200, status) }, 15*time.Second, 200*time.Millisecond) @@ -132,7 +132,7 @@ func TestActorInvocation(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { _, status, err := utils.HTTPPostWithStatus(fmt.Sprintf(callActorURL, secondActorURL), body) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 200, status) }, 15*time.Second, 200*time.Millisecond) }) diff --git a/tests/e2e/actor_reentrancy/actor_reentrancy_test.go b/tests/e2e/actor_reentrancy/actor_reentrancy_test.go index 7fa98c7fda0..9d7bc403460 100644 --- a/tests/e2e/actor_reentrancy/actor_reentrancy_test.go +++ b/tests/e2e/actor_reentrancy/actor_reentrancy_test.go @@ -137,7 +137,7 @@ func TestActorReentrancy(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { _, status, err := utils.HTTPPostWithStatus(fmt.Sprintf(actorInvokeURLFormat, reentrantURL, "hi", "method", "helloMethod"), body) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 200, status) }, 15*time.Second, 200*time.Millisecond) }) diff --git a/tests/e2e/actor_state/actor_state_test.go b/tests/e2e/actor_state/actor_state_test.go index 4618e13e1d0..d89cf898c3f 100644 --- a/tests/e2e/actor_state/actor_state_test.go +++ b/tests/e2e/actor_state/actor_state_test.go @@ -90,12 +90,12 @@ func TestActorState(t *testing.T) { actuid := uuid.String() resp, code, err := utils.HTTPGetWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID", initActorURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) assert.Empty(t, string(resp)) resp, code, err = utils.HTTPGetWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID/doesnotexist", httpURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusNoContent, code) assert.Empty(t, string(resp)) }) @@ -106,42 +106,42 @@ func TestActorState(t *testing.T) { actuid := uuid.String() _, code, err := utils.HTTPGetWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID", initActorURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) myData := []byte(`[{"operation":"upsert","request":{"key":"myKey","value":"myData"}}]`) resp, code, err := utils.HTTPPostWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID", httpURL, actuid), myData) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusNoContent, code) assert.Empty(t, string(resp)) resp, code, err = utils.HTTPGetWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID/myKey", httpURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) assert.Equal(t, `"myData"`, string(resp)) _, code, err = utils.HTTPGetWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-notMyActorID/myKey", httpURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusBadRequest, code) newData := []byte(`[{"operation":"upsert","request":{"key":"myKey","value":"newData"}}]`) resp, code, err = utils.HTTPPostWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID", httpURL, actuid), newData) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusNoContent, code) resp, code, err = utils.HTTPGetWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID/myKey", httpURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) assert.Equal(t, `"newData"`, string(resp)) deleteData := []byte(`[{"operation":"delete","request":{"key":"myKey"}}]`) resp, code, err = utils.HTTPPostWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID", httpURL, actuid), deleteData) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusNoContent, code) assert.Empty(t, string(resp)) resp, code, err = utils.HTTPGetWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID/myKey", httpURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusNoContent, code) assert.Empty(t, string(resp)) }) @@ -152,19 +152,19 @@ func TestActorState(t *testing.T) { actuid := uuid.String() _, code, err := utils.HTTPGetWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID", initActorURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) now := time.Now() myData := []byte(`[{"operation":"upsert","request":{"key":"myTTLKey","value":"myTTLData","metadata":{"ttlInSeconds":"3"}}}]`) resp, code, err := utils.HTTPPostWithStatus(fmt.Sprintf("%s/httpMyActorType/%s-myActorID", httpURL, actuid), myData) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusNoContent, code) assert.Empty(t, resp) // Ensure the data isn't deleted yet. resp, code, header, err := utils.HTTPGetWithStatusWithMetadata(fmt.Sprintf("%s/httpMyActorType/%s-myActorID/myTTLKey", httpURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) assert.Equal(t, `"myTTLData"`, string(resp)) ttlExpireTimeStr := header.Get("metadata.ttlexpiretime") @@ -189,7 +189,7 @@ func TestActorState(t *testing.T) { actuid := uuid.String() _, code, err := utils.HTTPGetWithStatus(fmt.Sprintf("%s/grpcMyActorType/%s-myActorID", initActorURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) b, err := json.Marshal(&runtimev1.GetActorStateRequest{ @@ -198,7 +198,7 @@ func TestActorState(t *testing.T) { require.NoError(t, err) resp, code, err := utils.HTTPGetWithStatusWithData(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) assert.Equal(t, "{}", string(resp)) }) @@ -209,7 +209,7 @@ func TestActorState(t *testing.T) { actuid := uuid.String() _, code, err := utils.HTTPGetWithStatus(fmt.Sprintf("%s/grpcMyActorType/%s-myActorID", initActorURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) b, err := json.Marshal(&runtimev1.ExecuteActorStateTransactionRequest{ @@ -223,7 +223,7 @@ func TestActorState(t *testing.T) { }) require.NoError(t, err) _, code, err = utils.HTTPPostWithStatus(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) b, err = json.Marshal(&runtimev1.GetActorStateRequest{ @@ -232,10 +232,10 @@ func TestActorState(t *testing.T) { }) require.NoError(t, err) resp, code, err := utils.HTTPGetWithStatusWithData(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) var gresp runtimev1.GetActorStateResponse - assert.NoError(t, json.Unmarshal(resp, &gresp)) + require.NoError(t, json.Unmarshal(resp, &gresp)) assert.Equal(t, []byte("myData"), gresp.Data) b, err = json.Marshal(&runtimev1.GetActorStateRequest{ @@ -243,7 +243,7 @@ func TestActorState(t *testing.T) { }) require.NoError(t, err) _, code, err = utils.HTTPGetWithStatusWithData(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusInternalServerError, code) b, err = json.Marshal(&runtimev1.ExecuteActorStateTransactionRequest{ @@ -257,7 +257,7 @@ func TestActorState(t *testing.T) { }) require.NoError(t, err) resp, code, err = utils.HTTPPostWithStatus(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) assert.Empty(t, string(resp)) @@ -266,9 +266,9 @@ func TestActorState(t *testing.T) { }) require.NoError(t, err) resp, code, err = utils.HTTPGetWithStatusWithData(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) - assert.NoError(t, json.Unmarshal(resp, &gresp)) + require.NoError(t, json.Unmarshal(resp, &gresp)) assert.Equal(t, []byte("newData"), gresp.Data) b, err = json.Marshal(&runtimev1.ExecuteActorStateTransactionRequest{ @@ -279,7 +279,7 @@ func TestActorState(t *testing.T) { }) require.NoError(t, err) _, code, err = utils.HTTPPostWithStatus(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) b, err = json.Marshal(&runtimev1.GetActorStateRequest{ @@ -287,7 +287,7 @@ func TestActorState(t *testing.T) { }) require.NoError(t, err) resp, code, err = utils.HTTPGetWithStatusWithData(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) assert.Equal(t, "{}", string(resp)) }) @@ -298,7 +298,7 @@ func TestActorState(t *testing.T) { actuid := uuid.String() _, code, err := utils.HTTPGetWithStatus(fmt.Sprintf("%s/grpcMyActorType/%s-myActorIDTTL", initActorURL, actuid)) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) b, err := json.Marshal(&runtimev1.ExecuteActorStateTransactionRequest{ @@ -315,17 +315,17 @@ func TestActorState(t *testing.T) { now := time.Now() _, code, err = utils.HTTPPostWithStatus(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) b, err = json.Marshal(&runtimev1.GetActorStateRequest{ ActorType: "grpcMyActorType", ActorId: fmt.Sprintf("%s-myActorIDTTL", actuid), Key: "myTTLKey", }) resp, code, err := utils.HTTPGetWithStatusWithData(grpcURL, b) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, http.StatusOK, code) var gresp runtimev1.GetActorStateResponse - assert.NoError(t, json.Unmarshal(resp, &gresp)) + require.NoError(t, json.Unmarshal(resp, &gresp)) assert.Equal(t, []byte("myData"), gresp.Data) ttlExpireTimeStr := gresp.Metadata["ttlExpireTime"] diff --git a/tests/e2e/workflows/workflow_test.go b/tests/e2e/workflows/workflow_test.go index a304bb65849..a49ab4f2f5f 100644 --- a/tests/e2e/workflows/workflow_test.go +++ b/tests/e2e/workflows/workflow_test.go @@ -87,7 +87,7 @@ func startTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Running", string(resp), "expected workflow to be Running, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) } @@ -103,7 +103,7 @@ func pauseResumeTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Running", string(resp), "expected workflow to be Running, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) @@ -113,7 +113,7 @@ func pauseResumeTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Suspended", string(resp), "expected workflow to be Suspended, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) @@ -123,7 +123,7 @@ func pauseResumeTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Running", string(resp), "expected workflow to be Running, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) } @@ -139,7 +139,7 @@ func raiseEventTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Running", string(resp), "expected workflow to be Running, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) @@ -165,7 +165,7 @@ func raiseEventTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Completed", string(resp), "expected workflow to be Completed, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) } @@ -182,7 +182,7 @@ func purgeTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Running", string(resp), "expected workflow to be Running, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) @@ -192,7 +192,7 @@ func purgeTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Terminated", string(resp), "expected workflow to be Terminated, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) @@ -208,7 +208,7 @@ func purgeTest(url string, instanceID string) func(t *testing.T) { require.EventuallyWithT(t, func(t *assert.CollectT) { resp, err = utils.HTTPGet(getString) - assert.NoError(t, err, "failure getting info on workflow") + require.NoError(t, err, "failure getting info on workflow") assert.Equalf(t, "Running", string(resp), "expected workflow to be Running, actual workflow state is: %s", string(resp)) }, 5*time.Second, 100*time.Millisecond) } diff --git a/tests/integration/framework/binary/binary.go b/tests/integration/framework/binary/binary.go index d6729e24398..86c8a01be43 100644 --- a/tests/integration/framework/binary/binary.go +++ b/tests/integration/framework/binary/binary.go @@ -23,7 +23,6 @@ import ( "sync" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/dapr/dapr/tests/integration/framework/iowriter" @@ -74,8 +73,8 @@ func Build(t *testing.T, name string) { cmd.Env = append(os.Environ(), "CGO_ENABLED=0") require.NoError(t, cmd.Run()) - assert.NoError(t, ioout.Close()) - assert.NoError(t, ioerr.Close()) + require.NoError(t, ioout.Close()) + require.NoError(t, ioerr.Close()) require.NoError(t, os.Setenv(EnvKey(name), binPath)) } else { diff --git a/tests/integration/framework/iowriter/iowriter_test.go b/tests/integration/framework/iowriter/iowriter_test.go index 4d8dd89dd4d..65006a0ee7c 100644 --- a/tests/integration/framework/iowriter/iowriter_test.go +++ b/tests/integration/framework/iowriter/iowriter_test.go @@ -72,19 +72,19 @@ func TestWrite(t *testing.T) { assert.Equal(t, 5, writer.buf.Len()) - assert.Len(t, logger.msgs, 0) + assert.Empty(t, logger.msgs) - assert.NoError(t, writer.Close()) + require.NoError(t, writer.Close()) _ = assert.Len(t, logger.msgs, 1) && assert.Equal(t, "TestLogger/proc: test", logger.msgs[0]) }) t.Run("should not return error on write when closed", func(t *testing.T) { writer := New(&mockLogger{t: t}, "proc").(*stdwriter) - assert.NoError(t, writer.Close()) + require.NoError(t, writer.Close()) _, err := writer.Write([]byte("test\n")) - assert.NoError(t, err, io.ErrClosedPipe) + require.NotErrorIs(t, err, io.ErrClosedPipe) assert.Equal(t, "test\n", writer.buf.String()) }) } @@ -97,7 +97,7 @@ func TestClose(t *testing.T) { writer.Close() assert.Equal(t, 0, writer.buf.Len()) - _ = assert.Equal(t, 1, len(logger.msgs)) && + _ = assert.Len(t, logger.msgs, 1) && assert.Equal(t, "TestLogger/proc: test", logger.msgs[0]) }) } @@ -110,7 +110,7 @@ func TestNotFailed(t *testing.T) { writer.Close() assert.Equal(t, 0, writer.buf.Len()) - assert.Equal(t, 0, len(logger.msgs)) + assert.Empty(t, logger.msgs) }) t.Run("if test has not failed but `DAPR_INTEGRATION_LOGS=true`, print output", func(t *testing.T) { @@ -121,7 +121,7 @@ func TestNotFailed(t *testing.T) { writer.Close() assert.Equal(t, 0, writer.buf.Len()) - _ = assert.Equal(t, 1, len(logger.msgs)) && + _ = assert.Len(t, logger.msgs, 1) && assert.Equal(t, "TestLogger/proc: test", logger.msgs[0]) }) @@ -133,7 +133,7 @@ func TestNotFailed(t *testing.T) { writer.Close() assert.Equal(t, 0, writer.buf.Len()) - _ = assert.Equal(t, 1, len(logger.msgs)) && + _ = assert.Len(t, logger.msgs, 1) && assert.Equal(t, "TestLogger/proc: test", logger.msgs[0]) }) } @@ -161,7 +161,7 @@ func TestConcurrency(t *testing.T) { wg.Wait() - assert.NoError(t, writer.Close()) + require.NoError(t, writer.Close()) assert.Equal(t, 0, writer.buf.Len()) assert.Len(t, logger.msgs, 2000) diff --git a/tests/integration/framework/process/daprd/daprd.go b/tests/integration/framework/process/daprd/daprd.go index e668ce38717..0f461d30c85 100644 --- a/tests/integration/framework/process/daprd/daprd.go +++ b/tests/integration/framework/process/daprd/daprd.go @@ -159,7 +159,7 @@ func (d *Daprd) Cleanup(t *testing.T) { func (d *Daprd) WaitUntilTCPReady(t *testing.T, ctx context.Context) { assert.Eventually(t, func() bool { dialer := net.Dialer{Timeout: time.Second} - net, err := dialer.DialContext(ctx, "tcp", "localhost:"+strconv.Itoa(d.HTTPPort())) + net, err := dialer.DialContext(ctx, "tcp", d.HTTPAddress()) if err != nil { return false } @@ -203,7 +203,7 @@ func (d *Daprd) WaitUntilAppHealth(t *testing.T, ctx context.Context) { case "grpc": assert.Eventually(t, func() bool { - conn, err := grpc.Dial("localhost:"+strconv.Itoa(d.appPort), + conn, err := grpc.Dial(d.AppAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) if conn != nil { @@ -229,18 +229,34 @@ func (d *Daprd) AppPort() int { return d.appPort } +func (d *Daprd) AppAddress() string { + return "localhost:" + strconv.Itoa(d.AppPort()) +} + func (d *Daprd) GRPCPort() int { return d.grpcPort } +func (d *Daprd) GRPCAddress() string { + return "localhost:" + strconv.Itoa(d.GRPCPort()) +} + func (d *Daprd) HTTPPort() int { return d.httpPort } +func (d *Daprd) HTTPAddress() string { + return "localhost:" + strconv.Itoa(d.HTTPPort()) +} + func (d *Daprd) InternalGRPCPort() int { return d.internalGRPCPort } +func (d *Daprd) InternalGRPCAddress() string { + return "localhost:" + strconv.Itoa(d.InternalGRPCPort()) +} + func (d *Daprd) PublicPort() int { return d.publicPort } diff --git a/tests/integration/framework/process/exec/exec.go b/tests/integration/framework/process/exec/exec.go index 28c0b8b08aa..30e90dc983a 100644 --- a/tests/integration/framework/process/exec/exec.go +++ b/tests/integration/framework/process/exec/exec.go @@ -62,9 +62,9 @@ func New(t *testing.T, binPath string, args []string, fopts ...Option) *exec { t.Helper() if runtime.GOOS == "windows" { // Windows returns 1 when we kill the process. - assert.ErrorContains(t, err, "exit status 1") + require.ErrorContains(t, err, "exit status 1") } else { - assert.NoError(t, err, "expected %q to run without error", binPath) + require.NoError(t, err, "expected %q to run without error", binPath) } }, exitCode: defaultExitCode, @@ -112,8 +112,8 @@ func (e *exec) Cleanup(t *testing.T) { e.lock.Lock() defer e.lock.Unlock() - assert.NoError(t, e.stderrpipe.Close()) - assert.NoError(t, e.stdoutpipe.Close()) + require.NoError(t, e.stderrpipe.Close()) + require.NoError(t, e.stdoutpipe.Close()) kill.Kill(t, e.cmd) e.checkExit(t) diff --git a/tests/integration/framework/process/exec/kill/kill_posix.go b/tests/integration/framework/process/exec/kill/kill_posix.go index 4fbebd9e38f..5697535398d 100644 --- a/tests/integration/framework/process/exec/kill/kill_posix.go +++ b/tests/integration/framework/process/exec/kill/kill_posix.go @@ -21,9 +21,9 @@ import ( "os/exec" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func interrupt(t *testing.T, cmd *exec.Cmd) { - assert.NoError(t, cmd.Process.Signal(os.Interrupt)) + require.NoError(t, cmd.Process.Signal(os.Interrupt)) } diff --git a/tests/integration/framework/process/grpc/grpc.go b/tests/integration/framework/process/grpc/grpc.go index 3bad6e5729d..5930bbfda7c 100644 --- a/tests/integration/framework/process/grpc/grpc.go +++ b/tests/integration/framework/process/grpc/grpc.go @@ -18,6 +18,7 @@ import ( "errors" "net" "net/http" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -68,6 +69,10 @@ func (g *GRPC) Port(t *testing.T) int { return ln.Addr().(*net.TCPAddr).Port } +func (g *GRPC) Address(t *testing.T) string { + return "localhost:" + strconv.Itoa(g.Port(t)) +} + func (g *GRPC) Run(t *testing.T, ctx context.Context) { ctx, cancel := context.WithCancel(ctx) diff --git a/tests/integration/framework/process/operator/operator.go b/tests/integration/framework/process/operator/operator.go index 57a4a18dac2..3fe72b44371 100644 --- a/tests/integration/framework/process/operator/operator.go +++ b/tests/integration/framework/process/operator/operator.go @@ -118,6 +118,10 @@ func (o *Operator) Port() int { return o.port } +func (o *Operator) Address() string { + return "localhost:" + strconv.Itoa(o.port) +} + func (o *Operator) MetricsPort() int { return o.metricsPort } diff --git a/tests/integration/framework/process/placement/placement.go b/tests/integration/framework/process/placement/placement.go index 7236734cd0c..58dc6b2294a 100644 --- a/tests/integration/framework/process/placement/placement.go +++ b/tests/integration/framework/process/placement/placement.go @@ -141,6 +141,10 @@ func (p *Placement) Port() int { return p.port } +func (p *Placement) Address() string { + return "localhost:" + strconv.Itoa(p.port) +} + func (p *Placement) HealthzPort() int { return p.healthzPort } diff --git a/tests/integration/framework/process/process.go b/tests/integration/framework/process/process.go index c86f5e19603..8ca09e9e808 100644 --- a/tests/integration/framework/process/process.go +++ b/tests/integration/framework/process/process.go @@ -20,6 +20,9 @@ import ( // Interface is an interface for running and cleaning up a process. type Interface interface { + // Run runs the process. Run(*testing.T, context.Context) + + // Cleanup cleans up the process. Cleanup(*testing.T) } diff --git a/tests/integration/framework/process/sentry/sentry.go b/tests/integration/framework/process/sentry/sentry.go index 632e33837e7..9f8de49ca33 100644 --- a/tests/integration/framework/process/sentry/sentry.go +++ b/tests/integration/framework/process/sentry/sentry.go @@ -161,6 +161,10 @@ func (s *Sentry) Port() int { return s.port } +func (s *Sentry) Address() string { + return "localhost:" + strconv.Itoa(s.Port()) +} + func (s *Sentry) MetricsPort() int { return s.metricsPort } @@ -191,7 +195,7 @@ func (s *Sentry) DialGRPC(t *testing.T, ctx context.Context, sentryID string) *g ) require.NoError(t, err) t.Cleanup(func() { - assert.NoError(t, conn.Close()) + require.NoError(t, conn.Close()) }) return conn diff --git a/tests/integration/framework/process/statestore/component.go b/tests/integration/framework/process/statestore/component.go index cd32dd72556..76b241fcab8 100644 --- a/tests/integration/framework/process/statestore/component.go +++ b/tests/integration/framework/process/statestore/component.go @@ -37,15 +37,15 @@ func newComponent(t *testing.T, opts options) *component { } func (c *component) BulkDelete(ctx context.Context, req *compv1pb.BulkDeleteRequest) (*compv1pb.BulkDeleteResponse, error) { - dr := make([]state.DeleteRequest, len(req.Items)) - for i, item := range req.Items { + dr := make([]state.DeleteRequest, len(req.GetItems())) + for i, item := range req.GetItems() { dr[i] = state.DeleteRequest{ - Key: item.Key, + Key: item.GetKey(), ETag: &item.GetEtag().Value, - Metadata: item.Metadata, + Metadata: item.GetMetadata(), Options: state.DeleteStateOption{ - Concurrency: concurrencyOf(item.Options.Concurrency), - Consistency: consistencyOf(item.Options.Consistency), + Concurrency: concurrencyOf(item.GetOptions().GetConcurrency()), + Consistency: consistencyOf(item.GetOptions().GetConsistency()), }, } } @@ -59,13 +59,13 @@ func (c *component) BulkDelete(ctx context.Context, req *compv1pb.BulkDeleteRequ } func (c *component) BulkGet(ctx context.Context, req *compv1pb.BulkGetRequest) (*compv1pb.BulkGetResponse, error) { - gr := make([]state.GetRequest, len(req.Items)) - for i, item := range req.Items { + gr := make([]state.GetRequest, len(req.GetItems())) + for i, item := range req.GetItems() { gr[i] = state.GetRequest{ - Key: item.Key, - Metadata: item.Metadata, + Key: item.GetKey(), + Metadata: item.GetMetadata(), Options: state.GetStateOption{ - Consistency: consistencyOf(item.Consistency), + Consistency: consistencyOf(item.GetConsistency()), }, } } @@ -89,27 +89,27 @@ func (c *component) BulkGet(ctx context.Context, req *compv1pb.BulkGetRequest) ( } } - gresp.Items = append(gresp.Items, gitem) + gresp.Items = append(gresp.GetItems(), gitem) } return &gresp, nil } func (c *component) BulkSet(ctx context.Context, req *compv1pb.BulkSetRequest) (*compv1pb.BulkSetResponse, error) { - sr := make([]state.SetRequest, len(req.Items)) - for i, item := range req.Items { + sr := make([]state.SetRequest, len(req.GetItems())) + for i, item := range req.GetItems() { var etag *string - if item.Etag != nil { + if item.GetEtag() != nil { etag = &item.GetEtag().Value } sr[i] = state.SetRequest{ - Key: item.Key, - Value: item.Value, + Key: item.GetKey(), + Value: item.GetValue(), ETag: etag, - Metadata: item.Metadata, + Metadata: item.GetMetadata(), Options: state.SetStateOption{ - Concurrency: concurrencyOf(item.Options.Concurrency), - Consistency: consistencyOf(item.Options.Consistency), + Concurrency: concurrencyOf(item.GetOptions().GetConcurrency()), + Consistency: consistencyOf(item.GetOptions().GetConsistency()), }, } } @@ -124,16 +124,16 @@ func (c *component) BulkSet(ctx context.Context, req *compv1pb.BulkSetRequest) ( func (c *component) Delete(ctx context.Context, req *compv1pb.DeleteRequest) (*compv1pb.DeleteResponse, error) { var etag *string - if req.Etag != nil && len(req.GetEtag().Value) > 0 { + if req.GetEtag() != nil && len(req.GetEtag().GetValue()) > 0 { etag = &req.GetEtag().Value } err := c.impl.Delete(ctx, &state.DeleteRequest{ - Key: req.Key, + Key: req.GetKey(), ETag: etag, - Metadata: req.Metadata, + Metadata: req.GetMetadata(), Options: state.DeleteStateOption{ - Concurrency: concurrencyOf(req.Options.Concurrency), - Consistency: consistencyOf(req.Options.Consistency), + Concurrency: concurrencyOf(req.GetOptions().GetConcurrency()), + Consistency: consistencyOf(req.GetOptions().GetConsistency()), }, }) if err != nil { @@ -155,10 +155,10 @@ func (c *component) Features(context.Context, *compv1pb.FeaturesRequest) (*compv func (c *component) Get(ctx context.Context, req *compv1pb.GetRequest) (*compv1pb.GetResponse, error) { resp, err := c.impl.Get(ctx, &state.GetRequest{ - Key: req.Key, - Metadata: req.Metadata, + Key: req.GetKey(), + Metadata: req.GetMetadata(), Options: state.GetStateOption{ - Consistency: consistencyOf(req.Consistency), + Consistency: consistencyOf(req.GetConsistency()), }, }) if err != nil { @@ -183,7 +183,7 @@ func (c *component) Init(ctx context.Context, req *compv1pb.InitRequest) (*compv return new(compv1pb.InitResponse), c.impl.Init(ctx, state.Metadata{ Base: metadata.Base{ Name: "state.wrapped-in-memory", - Properties: req.GetMetadata().Properties, + Properties: req.GetMetadata().GetProperties(), }, }) } @@ -198,17 +198,17 @@ func (c *component) Ping(ctx context.Context, req *compv1pb.PingRequest) (*compv func (c *component) Set(ctx context.Context, req *compv1pb.SetRequest) (*compv1pb.SetResponse, error) { var etag *string - if req.Etag != nil && len(req.GetEtag().Value) > 0 { + if req.GetEtag() != nil && len(req.GetEtag().GetValue()) > 0 { etag = &req.GetEtag().Value } err := c.impl.Set(ctx, &state.SetRequest{ - Key: req.Key, - Value: req.Value, - Metadata: req.Metadata, + Key: req.GetKey(), + Value: req.GetValue(), + Metadata: req.GetMetadata(), ETag: etag, Options: state.SetStateOption{ - Concurrency: concurrencyOf(req.Options.Concurrency), - Consistency: consistencyOf(req.Options.Consistency), + Concurrency: concurrencyOf(req.GetOptions().GetConcurrency()), + Consistency: consistencyOf(req.GetOptions().GetConsistency()), }, }) if err != nil { @@ -219,33 +219,33 @@ func (c *component) Set(ctx context.Context, req *compv1pb.SetRequest) (*compv1p func (c *component) Transact(ctx context.Context, req *compv1pb.TransactionalStateRequest) (*compv1pb.TransactionalStateResponse, error) { var operations []state.TransactionalStateOperation - for _, op := range req.Operations { - switch v := op.Request.(type) { + for _, op := range req.GetOperations() { + switch v := op.GetRequest().(type) { case *compv1pb.TransactionalStateOperation_Delete: delReq := state.DeleteRequest{ - Key: v.Delete.Key, - Metadata: v.Delete.Metadata, + Key: v.Delete.GetKey(), + Metadata: v.Delete.GetMetadata(), Options: state.DeleteStateOption{ - Concurrency: concurrencyOf(v.Delete.Options.Concurrency), - Consistency: consistencyOf(v.Delete.Options.Consistency), + Concurrency: concurrencyOf(v.Delete.GetOptions().GetConcurrency()), + Consistency: consistencyOf(v.Delete.GetOptions().GetConsistency()), }, } - if v.Delete.Etag != nil { + if v.Delete.GetEtag() != nil { delReq.ETag = &v.Delete.GetEtag().Value } operations = append(operations, delReq) case *compv1pb.TransactionalStateOperation_Set: setReq := state.SetRequest{ - Key: v.Set.Key, - Value: v.Set.Value, - Metadata: v.Set.Metadata, + Key: v.Set.GetKey(), + Value: v.Set.GetValue(), + Metadata: v.Set.GetMetadata(), Options: state.SetStateOption{ - Concurrency: concurrencyOf(v.Set.Options.Concurrency), - Consistency: consistencyOf(v.Set.Options.Consistency), + Concurrency: concurrencyOf(v.Set.GetOptions().GetConcurrency()), + Consistency: consistencyOf(v.Set.GetOptions().GetConsistency()), }, } - if v.Set.Etag != nil && v.Set.Etag.Value != "" { + if v.Set.GetEtag() != nil && v.Set.GetEtag().GetValue() != "" { setReq.ETag = &v.Set.GetEtag().Value } default: @@ -255,7 +255,7 @@ func (c *component) Transact(ctx context.Context, req *compv1pb.TransactionalSta err := c.impl.(state.TransactionalStore).Multi(ctx, &state.TransactionalStateRequest{ Operations: operations, - Metadata: req.Metadata, + Metadata: req.GetMetadata(), }) if err != nil { return nil, fmt.Errorf("error performing transactional state operation: %s", err) diff --git a/tests/integration/suite/actors/grpc/ttl.go b/tests/integration/suite/actors/grpc/ttl.go index 5e9beb502da..730b97ec430 100644 --- a/tests/integration/suite/actors/grpc/ttl.go +++ b/tests/integration/suite/actors/grpc/ttl.go @@ -15,11 +15,9 @@ package grpc import ( "context" - "fmt" "net/http" "os" "path/filepath" - "strconv" "testing" "time" @@ -76,7 +74,7 @@ spec: l.daprd = daprd.New(t, daprd.WithInMemoryActorStateStore("mystore"), daprd.WithConfigs(configFile), - daprd.WithPlacementAddresses("localhost:"+strconv.Itoa(l.place.Port())), + daprd.WithPlacementAddresses(l.place.Address()), daprd.WithAppPort(srv.Port()), ) @@ -89,7 +87,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { l.place.WaitUntilRunning(t, ctx) l.daprd.WaitUntilRunning(t, ctx) - conn, err := grpc.DialContext(ctx, fmt.Sprintf("localhost:%d", l.daprd.GRPCPort()), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) + conn, err := grpc.DialContext(ctx, l.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, conn.Close()) }) client := rtv1.NewDaprClient(conn) @@ -100,6 +98,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { ActorId: "myactorid", Method: "foo", }) + //nolint:testifylint assert.NoError(c, err) }, time.Second*10, time.Millisecond*100, "actor not ready") @@ -128,8 +127,8 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { }) require.NoError(t, err) - assert.Equal(t, "myvalue", string(resp.Data)) - ttlExpireTimeStr, ok := resp.Metadata["ttlExpireTime"] + assert.Equal(t, "myvalue", string(resp.GetData())) + ttlExpireTimeStr, ok := resp.GetMetadata()["ttlExpireTime"] require.True(t, ok) var ttlExpireTime time.Time ttlExpireTime, err = time.Parse(time.RFC3339, ttlExpireTimeStr) @@ -162,7 +161,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { }) require.NoError(t, err) - assert.Equal(t, "myvalue", string(resp.Data)) + assert.Equal(t, "myvalue", string(resp.GetData())) }) t.Run("ensure the state key is deleted after the ttl", func(t *testing.T) { @@ -173,8 +172,8 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { Key: "mykey", }) require.NoError(c, err) - assert.Empty(c, resp.Data) - assert.Empty(c, resp.Metadata) + assert.Empty(c, resp.GetData()) + assert.Empty(c, resp.GetMetadata()) }, 5*time.Second, 100*time.Millisecond) }) } diff --git a/tests/integration/suite/actors/healthz/deactivate-on-placement-fail.go b/tests/integration/suite/actors/healthz/deactivate-on-placement-fail.go index 71551412a25..c26a18dbeb8 100644 --- a/tests/integration/suite/actors/healthz/deactivate-on-placement-fail.go +++ b/tests/integration/suite/actors/healthz/deactivate-on-placement-fail.go @@ -25,6 +25,7 @@ import ( chi "github.com/go-chi/chi/v5" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/dapr/dapr/tests/integration/framework" "github.com/dapr/dapr/tests/integration/framework/process/daprd" @@ -105,12 +106,12 @@ func (h *deactivateOnPlacementFail) Run(t *testing.T, ctx context.Context) { assert.EventuallyWithT(t, func(t *assert.CollectT) { daprdURL := fmt.Sprintf("http://localhost:%d/v1.0/actors/myactortype/myactor%d/method/foo", h.daprd.HTTPPort(), i) req, err := http.NewRequestWithContext(ctx, http.MethodPost, daprdURL, nil) - assert.NoError(t, err) + require.NoError(t, err) resp, err := client.Do(req) - assert.NoError(t, err) + require.NoError(t, err) defer resp.Body.Close() body, err := io.ReadAll(resp.Body) - assert.NoError(t, err) + require.NoError(t, err) assert.Equalf(t, http.StatusOK, resp.StatusCode, "Response body: %v", string(body)) }, 10*time.Second, 100*time.Millisecond, "actor not ready") } diff --git a/tests/integration/suite/actors/healthz/healthz.go b/tests/integration/suite/actors/healthz/healthz.go index 0d7c2b1f2cf..554b5334baa 100644 --- a/tests/integration/suite/actors/healthz/healthz.go +++ b/tests/integration/suite/actors/healthz/healthz.go @@ -66,7 +66,7 @@ func (h *healthz) Setup(t *testing.T) []framework.Option { h.place = placement.New(t) h.daprd = daprd.New(t, daprd.WithInMemoryActorStateStore("mystore"), - daprd.WithPlacementAddresses("localhost:"+strconv.Itoa(h.place.Port())), + daprd.WithPlacementAddresses(h.place.Address()), daprd.WithAppProtocol("http"), daprd.WithAppPort(srv.Port()), ) @@ -94,5 +94,5 @@ func (h *healthz) Run(t *testing.T, ctx context.Context) { resp, err := client.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) } diff --git a/tests/integration/suite/actors/healthz/initerror.go b/tests/integration/suite/actors/healthz/initerror.go index 5a7f2522822..25b2f56a968 100644 --- a/tests/integration/suite/actors/healthz/initerror.go +++ b/tests/integration/suite/actors/healthz/initerror.go @@ -20,7 +20,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/dapr/dapr/tests/integration/framework" @@ -68,7 +67,7 @@ func (i *initerror) Setup(t *testing.T) []framework.Option { i.place = placement.New(t) i.daprd = daprd.New(t, daprd.WithInMemoryActorStateStore("mystore"), - daprd.WithPlacementAddresses("localhost:"+strconv.Itoa(i.place.Port())), + daprd.WithPlacementAddresses(i.place.Address()), daprd.WithAppPort(srv.Port()), // Daprd is super noisy in debug mode when connecting to placement. daprd.WithLogLevel("info"), @@ -98,9 +97,9 @@ func (i *initerror) Run(t *testing.T, ctx context.Context) { req, err := http.NewRequestWithContext(rctx, http.MethodPost, daprdURL, nil) require.NoError(t, err) resp, err := client.Do(req) - assert.ErrorIs(t, err, context.DeadlineExceeded) + require.ErrorIs(t, err, context.DeadlineExceeded) if resp != nil && resp.Body != nil { - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) } close(i.blockConfig) @@ -116,5 +115,5 @@ func (i *initerror) Run(t *testing.T, ctx context.Context) { resp, err = client.Do(req) require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) } diff --git a/tests/integration/suite/actors/http/ttl.go b/tests/integration/suite/actors/http/ttl.go index 8453efb0fcf..3b377b79fc2 100644 --- a/tests/integration/suite/actors/http/ttl.go +++ b/tests/integration/suite/actors/http/ttl.go @@ -73,7 +73,7 @@ spec: l.daprd = daprd.New(t, daprd.WithInMemoryActorStateStore("mystore"), daprd.WithConfigs(configFile), - daprd.WithPlacementAddresses("localhost:"+strconv.Itoa(l.place.Port())), + daprd.WithPlacementAddresses(l.place.Address()), daprd.WithAppPort(srv.Port()), ) @@ -96,7 +96,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { require.EventuallyWithT(t, func(c *assert.CollectT) { resp, rErr := client.Do(req) require.NoError(c, rErr) - assert.NoError(c, resp.Body.Close()) + require.NoError(c, resp.Body.Close()) assert.Equal(c, http.StatusOK, resp.StatusCode) }, time.Second*10, time.Millisecond*100, "actor not ready") @@ -107,7 +107,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { require.NoError(t, err) resp, err := client.Do(req) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) t.Run("ensure the state key returns a ttlExpireTime header", func(t *testing.T) { req, err = http.NewRequest(http.MethodGet, daprdURL+"/v1.0/actors/myactortype/myactorid/state/key1", nil) @@ -118,7 +118,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { var body []byte body, err = io.ReadAll(resp.Body) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) assert.Equal(t, http.StatusOK, resp.StatusCode) assert.Equal(t, `"value1"`, string(body)) ttlExpireTimeStr := resp.Header.Get("metadata.ttlExpireTime") @@ -135,7 +135,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { //nolint:bodyclose resp, err = client.Do(req) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) time.Sleep(time.Second * 2) @@ -147,7 +147,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { var body []byte body, err = io.ReadAll(resp.Body) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) assert.Equal(t, http.StatusOK, resp.StatusCode) assert.Equal(t, `"value1"`, string(body)) }) @@ -162,7 +162,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { var body []byte body, err = io.ReadAll(resp.Body) require.NoError(c, err) - assert.NoError(c, resp.Body.Close()) + require.NoError(c, resp.Body.Close()) assert.Empty(c, string(body)) assert.Equal(c, http.StatusNoContent, resp.StatusCode) }, 5*time.Second, 100*time.Millisecond) diff --git a/tests/integration/suite/actors/metadata/client.go b/tests/integration/suite/actors/metadata/client.go index 394c8d6fe73..e9a21c90e3e 100644 --- a/tests/integration/suite/actors/metadata/client.go +++ b/tests/integration/suite/actors/metadata/client.go @@ -16,7 +16,6 @@ package metadata import ( "context" "net/http" - "strconv" "testing" "time" @@ -57,7 +56,7 @@ func (m *client) Setup(t *testing.T) []framework.Option { srv := prochttp.New(t, prochttp.WithHandler(handler)) m.place = placement.New(t) m.daprd = daprd.New(t, - daprd.WithPlacementAddresses("localhost:"+strconv.Itoa(m.place.Port())), + daprd.WithPlacementAddresses(m.place.Address()), daprd.WithAppProtocol("http"), daprd.WithAppPort(srv.Port()), daprd.WithLogLevel("info"), // Daprd is super noisy in debug mode when connecting to placement. diff --git a/tests/integration/suite/actors/metadata/host.go b/tests/integration/suite/actors/metadata/host.go index 1d18bfbb0c9..74bf5a9b00e 100644 --- a/tests/integration/suite/actors/metadata/host.go +++ b/tests/integration/suite/actors/metadata/host.go @@ -16,7 +16,6 @@ package metadata import ( "context" "net/http" - "strconv" "testing" "time" @@ -61,7 +60,7 @@ func (m *host) Setup(t *testing.T) []framework.Option { m.place = placement.New(t) m.daprd = daprd.New(t, daprd.WithInMemoryActorStateStore("mystore"), - daprd.WithPlacementAddresses("localhost:"+strconv.Itoa(m.place.Port())), + daprd.WithPlacementAddresses(m.place.Address()), daprd.WithAppProtocol("http"), daprd.WithAppPort(srv.Port()), daprd.WithLogLevel("info"), // Daprd is super noisy in debug mode when connecting to placement. diff --git a/tests/integration/suite/actors/metadata/shared.go b/tests/integration/suite/actors/metadata/shared.go index 9b248fbd180..c74701c232f 100644 --- a/tests/integration/suite/actors/metadata/shared.go +++ b/tests/integration/suite/actors/metadata/shared.go @@ -20,7 +20,7 @@ import ( "net/http" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Subset of data returned by the metadata endpoint. @@ -36,23 +36,19 @@ type metadataRes struct { } `json:"actorRuntime"` } -func getMetadata(t assert.TestingT, ctx context.Context, client *http.Client, port int) (res metadataRes) { +func getMetadata(t require.TestingT, ctx context.Context, client *http.Client, port int) (res metadataRes) { ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://localhost:%d/v1.0/metadata", port), nil) - if !assert.NoError(t, err) { - return res - } + require.NoError(t, err) resp, err := client.Do(req) - if !assert.NoError(t, err) { - return res - } + require.NoError(t, err) defer resp.Body.Close() err = json.NewDecoder(resp.Body).Decode(&res) - assert.NoError(t, err) + require.NoError(t, err) return res } diff --git a/tests/integration/suite/actors/reminders/basic.go b/tests/integration/suite/actors/reminders/basic.go index a453d4bc65d..e9e18fc209e 100644 --- a/tests/integration/suite/actors/reminders/basic.go +++ b/tests/integration/suite/actors/reminders/basic.go @@ -15,7 +15,6 @@ package reminders import ( "context" - "fmt" "net/http" "strconv" "strings" @@ -66,7 +65,7 @@ func (b *basic) Setup(t *testing.T) []framework.Option { b.place = placement.New(t) b.daprd = daprd.New(t, daprd.WithInMemoryActorStateStore("mystore"), - daprd.WithPlacementAddresses("localhost:"+strconv.Itoa(b.place.Port())), + daprd.WithPlacementAddresses(b.place.Address()), daprd.WithAppPort(srv.Port()), ) @@ -89,7 +88,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { require.EventuallyWithT(t, func(c *assert.CollectT) { resp, rErr := client.Do(req) require.NoError(c, rErr) - assert.NoError(c, resp.Body.Close()) + require.NoError(c, resp.Body.Close()) assert.Equal(c, http.StatusOK, resp.StatusCode) }, time.Second*10, time.Millisecond*100, "actor not ready in time") @@ -99,14 +98,14 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { resp, err := client.Do(req) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) assert.Equal(t, http.StatusNoContent, resp.StatusCode) assert.Eventually(t, func() bool { return b.methodcalled.Load() == 1 }, time.Second*3, time.Millisecond*100) - conn, err := grpc.DialContext(ctx, fmt.Sprintf("localhost:%d", b.daprd.GRPCPort()), + conn, err := grpc.DialContext(ctx, b.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), ) require.NoError(t, err) diff --git a/tests/integration/suite/actors/reminders/rebalancing.go b/tests/integration/suite/actors/reminders/rebalancing.go index 7cb1a25f978..f94b43e458d 100644 --- a/tests/integration/suite/actors/reminders/rebalancing.go +++ b/tests/integration/suite/actors/reminders/rebalancing.go @@ -97,7 +97,7 @@ spec: - name: actorStateStore value: 'true' `), - daprd.WithPlacementAddresses("localhost:"+strconv.Itoa(i.place.Port())), + daprd.WithPlacementAddresses(i.place.Address()), daprd.WithAppPort(i.srv[j].Port()), // Daprd is super noisy in debug mode when connecting to placement. daprd.WithLogLevel("info"), @@ -142,7 +142,7 @@ func (i *rebalancing) Run(t *testing.T, ctx context.Context) { require.NoError(c, err) resp, rErr := client.Do(req) require.NoError(c, rErr) - assert.NoError(c, resp.Body.Close()) + require.NoError(c, resp.Body.Close()) assert.Equal(c, http.StatusOK, resp.StatusCode) }, 10*time.Second, 100*time.Millisecond, "actors not ready") diff --git a/tests/integration/suite/daprd/httpserver/httpserver.go b/tests/integration/suite/daprd/httpserver/httpserver.go index 76478b3cdff..98c60bf3039 100644 --- a/tests/integration/suite/daprd/httpserver/httpserver.go +++ b/tests/integration/suite/daprd/httpserver/httpserver.go @@ -118,7 +118,7 @@ func (h *httpServer) Run(t *testing.T, ctx context.Context) { res, err := h1Client.Do(req) require.NoError(t, err) t.Cleanup(func() { - assert.NoError(t, res.Body.Close()) + require.NoError(t, res.Body.Close()) }) // This response should have arrived over HTTP/1 diff --git a/tests/integration/suite/daprd/metrics/metrics.go b/tests/integration/suite/daprd/metrics/metrics.go index 405d90378bd..c31a97f621e 100644 --- a/tests/integration/suite/daprd/metrics/metrics.go +++ b/tests/integration/suite/daprd/metrics/metrics.go @@ -74,7 +74,7 @@ func (m *metrics) Run(t *testing.T, ctx context.Context) { m.httpClient = util.HTTPClient(t) conn, err := grpc.DialContext(ctx, - fmt.Sprintf("localhost:%d", m.daprd.GRPCPort()), + m.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), ) @@ -200,7 +200,7 @@ func (m *metrics) getMetrics(t *testing.T, ctx context.Context) map[string]float continue } - for _, m := range mf.Metric { + for _, m := range mf.GetMetric() { key := mf.GetName() for _, l := range m.GetLabel() { key += "|" + l.GetName() + ":" + l.GetValue() diff --git a/tests/integration/suite/daprd/mtls/kubernetes/disable.go b/tests/integration/suite/daprd/mtls/kubernetes/disable.go index 87efbae62dd..837e87151d5 100644 --- a/tests/integration/suite/daprd/mtls/kubernetes/disable.go +++ b/tests/integration/suite/daprd/mtls/kubernetes/disable.go @@ -17,7 +17,6 @@ import ( "context" "os" "path/filepath" - "strconv" "testing" "time" @@ -63,19 +62,19 @@ func (e *disable) Setup(t *testing.T) []framework.Option { e.placement = procplacement.New(t, procplacement.WithEnableTLS(true), procplacement.WithTrustAnchorsFile(taFile), - procplacement.WithSentryAddress("localhost:"+strconv.Itoa(e.sentry.Port())), + procplacement.WithSentryAddress(e.sentry.Address()), ) - e.operator = newOperator(t, bundle.TrustAnchors, "localhost:"+strconv.Itoa(e.sentry.Port())) + e.operator = newOperator(t, bundle.TrustAnchors, e.sentry.Address()) e.daprd = procdaprd.New(t, procdaprd.WithAppID("my-app"), procdaprd.WithMode("kubernetes"), procdaprd.WithExecOptions(exec.WithEnvVars("DAPR_TRUST_ANCHORS", string(bundle.TrustAnchors))), - procdaprd.WithSentryAddress("localhost:"+strconv.Itoa(e.sentry.Port())), - procdaprd.WithControlPlaneAddress("localhost:"+strconv.Itoa(e.operator.Port(t))), + procdaprd.WithSentryAddress(e.sentry.Address()), + procdaprd.WithControlPlaneAddress(e.operator.Address(t)), procdaprd.WithDisableK8sSecretStore(true), - procdaprd.WithPlacementAddresses("localhost:"+strconv.Itoa(e.placement.Port())), + procdaprd.WithPlacementAddresses(e.placement.Address()), // Disable mTLS procdaprd.WithEnableMTLS(false), @@ -92,21 +91,21 @@ func (e *disable) Run(t *testing.T, ctx context.Context) { e.daprd.WaitUntilRunning(t, ctx) t.Run("trying plain text connection to Dapr API should succeed", func(t *testing.T) { - conn, err := grpc.DialContext(ctx, "localhost:"+strconv.Itoa(e.daprd.InternalGRPCPort()), + conn, err := grpc.DialContext(ctx, e.daprd.InternalGRPCAddress(), grpc.WithReturnConnectionError(), grpc.WithTransportCredentials(insecure.NewCredentials()), ) require.NoError(t, err) conn.Connect() assert.Equal(t, connectivity.Ready, conn.GetState()) - assert.NoError(t, conn.Close()) + require.NoError(t, conn.Close()) }) t.Run("trying mTLS connection to Dapr API should fail", func(t *testing.T) { sctx, cancel := context.WithCancel(ctx) secProv, err := security.New(sctx, security.Options{ - SentryAddress: "localhost:" + strconv.Itoa(e.sentry.Port()), + SentryAddress: e.sentry.Address(), ControlPlaneTrustDomain: "localhost", ControlPlaneNamespace: "default", TrustAnchors: e.trustAnchors, @@ -126,7 +125,7 @@ func (e *disable) Run(t *testing.T, ctx context.Context) { case <-time.After(5 * time.Second): t.Fatal("timed out waiting for security provider to stop") case err = <-secProvErr: - assert.NoError(t, err) + require.NoError(t, err) } }) @@ -138,8 +137,8 @@ func (e *disable) Run(t *testing.T, ctx context.Context) { gctx, gcancel := context.WithTimeout(ctx, time.Second) t.Cleanup(gcancel) - _, err = grpc.DialContext(gctx, "localhost:"+strconv.Itoa(e.daprd.InternalGRPCPort()), sec.GRPCDialOptionMTLS(myAppID), + _, err = grpc.DialContext(gctx, e.daprd.InternalGRPCAddress(), sec.GRPCDialOptionMTLS(myAppID), grpc.WithReturnConnectionError()) - assert.ErrorContains(t, err, "tls: first record does not look like a TLS handshake") + require.ErrorContains(t, err, "tls: first record does not look like a TLS handshake") }) } diff --git a/tests/integration/suite/daprd/mtls/kubernetes/enable.go b/tests/integration/suite/daprd/mtls/kubernetes/enable.go index bbde2b5768f..125e933cd8e 100644 --- a/tests/integration/suite/daprd/mtls/kubernetes/enable.go +++ b/tests/integration/suite/daprd/mtls/kubernetes/enable.go @@ -17,7 +17,6 @@ import ( "context" "os" "path/filepath" - "strconv" "testing" "time" @@ -63,19 +62,19 @@ func (e *enable) Setup(t *testing.T) []framework.Option { e.placement = procplacement.New(t, procplacement.WithEnableTLS(true), procplacement.WithTrustAnchorsFile(taFile), - procplacement.WithSentryAddress("localhost:"+strconv.Itoa(e.sentry.Port())), + procplacement.WithSentryAddress(e.sentry.Address()), ) - e.operator = newOperator(t, bundle.TrustAnchors, "localhost:"+strconv.Itoa(e.sentry.Port())) + e.operator = newOperator(t, bundle.TrustAnchors, e.sentry.Address()) e.daprd = procdaprd.New(t, procdaprd.WithAppID("my-app"), procdaprd.WithMode("kubernetes"), procdaprd.WithExecOptions(exec.WithEnvVars("DAPR_TRUST_ANCHORS", string(bundle.TrustAnchors))), - procdaprd.WithSentryAddress("localhost:"+strconv.Itoa(e.sentry.Port())), - procdaprd.WithControlPlaneAddress("localhost:"+strconv.Itoa(e.operator.Port(t))), + procdaprd.WithSentryAddress(e.sentry.Address()), + procdaprd.WithControlPlaneAddress(e.operator.Address(t)), procdaprd.WithDisableK8sSecretStore(true), - procdaprd.WithPlacementAddresses("localhost:"+strconv.Itoa(e.placement.Port())), + procdaprd.WithPlacementAddresses(e.placement.Address()), // Enable mTLS procdaprd.WithEnableMTLS(true), @@ -94,7 +93,7 @@ func (e *enable) Run(t *testing.T, ctx context.Context) { t.Run("trying plain text connection to Dapr API should fail", func(t *testing.T) { gctx, gcancel := context.WithTimeout(ctx, time.Second) t.Cleanup(gcancel) - _, err := grpc.DialContext(gctx, "localhost:"+strconv.Itoa(e.daprd.InternalGRPCPort()), + _, err := grpc.DialContext(gctx, e.daprd.InternalGRPCAddress(), grpc.WithReturnConnectionError(), grpc.WithTransportCredentials(insecure.NewCredentials()), ) @@ -105,7 +104,7 @@ func (e *enable) Run(t *testing.T, ctx context.Context) { sctx, cancel := context.WithCancel(ctx) secProv, err := security.New(sctx, security.Options{ - SentryAddress: "localhost:" + strconv.Itoa(e.sentry.Port()), + SentryAddress: e.sentry.Address(), ControlPlaneTrustDomain: "localhost", ControlPlaneNamespace: "default", TrustAnchors: e.trustAnchors, @@ -125,7 +124,7 @@ func (e *enable) Run(t *testing.T, ctx context.Context) { case <-time.After(5 * time.Second): t.Fatal("timed out waiting for security provider to stop") case err = <-secProvErr: - assert.NoError(t, err) + require.NoError(t, err) } }) @@ -135,11 +134,11 @@ func (e *enable) Run(t *testing.T, ctx context.Context) { myAppID, err := spiffeid.FromSegments(spiffeid.RequireTrustDomainFromString("public"), "ns", "default", "my-app") require.NoError(t, err) - conn, err := grpc.DialContext(ctx, "localhost:"+strconv.Itoa(e.daprd.InternalGRPCPort()), sec.GRPCDialOptionMTLS(myAppID), + conn, err := grpc.DialContext(ctx, e.daprd.InternalGRPCAddress(), sec.GRPCDialOptionMTLS(myAppID), grpc.WithReturnConnectionError()) require.NoError(t, err) conn.Connect() assert.Equal(t, connectivity.Ready, conn.GetState()) - assert.NoError(t, conn.Close()) + require.NoError(t, conn.Close()) }) } diff --git a/tests/integration/suite/daprd/mtls/kubernetes/operator.go b/tests/integration/suite/daprd/mtls/kubernetes/operator.go index 16cee84c096..3999da4a9aa 100644 --- a/tests/integration/suite/daprd/mtls/kubernetes/operator.go +++ b/tests/integration/suite/daprd/mtls/kubernetes/operator.go @@ -18,7 +18,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" @@ -53,7 +52,7 @@ func newOperator(t *testing.T, trustAnchors []byte, sentryAddress string) *procg case <-time.After(5 * time.Second): t.Fatal("timed out waiting for security provider to stop") case err = <-secProvErr: - assert.NoError(t, err) + require.NoError(t, err) } }) go func() { diff --git a/tests/integration/suite/daprd/mtls/standalone/disable.go b/tests/integration/suite/daprd/mtls/standalone/disable.go index d1c205ff5cd..3825fea32ae 100644 --- a/tests/integration/suite/daprd/mtls/standalone/disable.go +++ b/tests/integration/suite/daprd/mtls/standalone/disable.go @@ -15,7 +15,6 @@ package standalone import ( "context" - "strconv" "testing" "time" @@ -52,14 +51,14 @@ func (e *disable) Setup(t *testing.T) []framework.Option { e.placement = procplacement.New(t, procplacement.WithEnableTLS(false), - procplacement.WithSentryAddress("localhost:"+strconv.Itoa(e.sentry.Port())), + procplacement.WithSentryAddress(e.sentry.Address()), ) e.daprd = procdaprd.New(t, procdaprd.WithAppID("my-app"), procdaprd.WithMode("standalone"), - procdaprd.WithSentryAddress("localhost:"+strconv.Itoa(e.sentry.Port())), - procdaprd.WithPlacementAddresses("localhost:"+strconv.Itoa(e.placement.Port())), + procdaprd.WithSentryAddress(e.sentry.Address()), + procdaprd.WithPlacementAddresses(e.placement.Address()), // Disable mTLS procdaprd.WithEnableMTLS(false), @@ -75,21 +74,21 @@ func (e *disable) Run(t *testing.T, ctx context.Context) { e.daprd.WaitUntilRunning(t, ctx) t.Run("trying plain text connection to Dapr API should succeed", func(t *testing.T) { - conn, err := grpc.DialContext(ctx, "localhost:"+strconv.Itoa(e.daprd.InternalGRPCPort()), + conn, err := grpc.DialContext(ctx, e.daprd.InternalGRPCAddress(), grpc.WithReturnConnectionError(), grpc.WithTransportCredentials(insecure.NewCredentials()), ) require.NoError(t, err) conn.Connect() assert.Equal(t, connectivity.Ready, conn.GetState()) - assert.NoError(t, conn.Close()) + require.NoError(t, conn.Close()) }) t.Run("trying mTLS connection to Dapr API should fail", func(t *testing.T) { sctx, cancel := context.WithCancel(ctx) secProv, err := security.New(sctx, security.Options{ - SentryAddress: "localhost:" + strconv.Itoa(e.sentry.Port()), + SentryAddress: e.sentry.Address(), ControlPlaneTrustDomain: "localhost", ControlPlaneNamespace: "default", TrustAnchors: e.trustAnchors, @@ -109,7 +108,7 @@ func (e *disable) Run(t *testing.T, ctx context.Context) { case <-time.After(5 * time.Second): t.Fatal("timed out waiting for security provider to stop") case err = <-secProvErr: - assert.NoError(t, err) + require.NoError(t, err) } }) @@ -121,8 +120,8 @@ func (e *disable) Run(t *testing.T, ctx context.Context) { gctx, gcancel := context.WithTimeout(ctx, time.Second) t.Cleanup(gcancel) - _, err = grpc.DialContext(gctx, "localhost:"+strconv.Itoa(e.daprd.InternalGRPCPort()), sec.GRPCDialOptionMTLS(myAppID), + _, err = grpc.DialContext(gctx, e.daprd.InternalGRPCAddress(), sec.GRPCDialOptionMTLS(myAppID), grpc.WithReturnConnectionError()) - assert.ErrorContains(t, err, "tls: first record does not look like a TLS handshake") + require.ErrorContains(t, err, "tls: first record does not look like a TLS handshake") }) } diff --git a/tests/integration/suite/daprd/mtls/standalone/enable.go b/tests/integration/suite/daprd/mtls/standalone/enable.go index bd219b729c5..f59a78d5f4e 100644 --- a/tests/integration/suite/daprd/mtls/standalone/enable.go +++ b/tests/integration/suite/daprd/mtls/standalone/enable.go @@ -17,7 +17,6 @@ import ( "context" "os" "path/filepath" - "strconv" "testing" "time" @@ -61,15 +60,15 @@ func (e *enable) Setup(t *testing.T) []framework.Option { e.placement = procplacement.New(t, procplacement.WithEnableTLS(true), procplacement.WithTrustAnchorsFile(taFile), - procplacement.WithSentryAddress("localhost:"+strconv.Itoa(e.sentry.Port())), + procplacement.WithSentryAddress(e.sentry.Address()), ) e.daprd = procdaprd.New(t, procdaprd.WithAppID("my-app"), procdaprd.WithMode("standalone"), procdaprd.WithExecOptions(exec.WithEnvVars("DAPR_TRUST_ANCHORS", string(bundle.TrustAnchors))), - procdaprd.WithSentryAddress("localhost:"+strconv.Itoa(e.sentry.Port())), - procdaprd.WithPlacementAddresses("localhost:"+strconv.Itoa(e.placement.Port())), + procdaprd.WithSentryAddress(e.sentry.Address()), + procdaprd.WithPlacementAddresses(e.placement.Address()), // Enable mTLS procdaprd.WithEnableMTLS(true), @@ -88,7 +87,7 @@ func (e *enable) Run(t *testing.T, ctx context.Context) { t.Run("trying plain text connection to Dapr API should fail", func(t *testing.T) { gctx, gcancel := context.WithTimeout(ctx, time.Second) t.Cleanup(gcancel) - _, err := grpc.DialContext(gctx, "localhost:"+strconv.Itoa(e.daprd.InternalGRPCPort()), + _, err := grpc.DialContext(gctx, e.daprd.InternalGRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithReturnConnectionError(), ) @@ -99,7 +98,7 @@ func (e *enable) Run(t *testing.T, ctx context.Context) { sctx, cancel := context.WithCancel(ctx) secProv, err := security.New(sctx, security.Options{ - SentryAddress: "localhost:" + strconv.Itoa(e.sentry.Port()), + SentryAddress: e.sentry.Address(), ControlPlaneTrustDomain: "localhost", ControlPlaneNamespace: "default", TrustAnchors: e.trustAnchors, @@ -119,7 +118,7 @@ func (e *enable) Run(t *testing.T, ctx context.Context) { case <-time.After(5 * time.Second): t.Fatal("timed out waiting for security provider to stop") case err = <-secProvErr: - assert.NoError(t, err) + require.NoError(t, err) } }) @@ -129,11 +128,11 @@ func (e *enable) Run(t *testing.T, ctx context.Context) { myAppID, err := spiffeid.FromSegments(spiffeid.RequireTrustDomainFromString("public"), "ns", "default", "my-app") require.NoError(t, err) - conn, err := grpc.DialContext(ctx, "localhost:"+strconv.Itoa(e.daprd.InternalGRPCPort()), sec.GRPCDialOptionMTLS(myAppID), + conn, err := grpc.DialContext(ctx, e.daprd.InternalGRPCAddress(), sec.GRPCDialOptionMTLS(myAppID), grpc.WithReturnConnectionError()) require.NoError(t, err) conn.Connect() assert.Equal(t, connectivity.Ready, conn.GetState()) - assert.NoError(t, conn.Close()) + require.NoError(t, conn.Close()) }) } diff --git a/tests/integration/suite/daprd/outbox/grpc/basic.go b/tests/integration/suite/daprd/outbox/grpc/basic.go index f60ecefd5c7..029066cdf32 100644 --- a/tests/integration/suite/daprd/outbox/grpc/basic.go +++ b/tests/integration/suite/daprd/outbox/grpc/basic.go @@ -15,7 +15,6 @@ package grpc import ( "context" - "fmt" "sync" "testing" "time" @@ -46,7 +45,7 @@ func (o *basic) Setup(t *testing.T) []framework.Option { onTopicEvent := func(ctx context.Context, in *runtimev1pb.TopicEventRequest) (*runtimev1pb.TopicEventResponse, error) { o.lock.Lock() defer o.lock.Unlock() - o.msg = in.Data + o.msg = in.GetData() return &runtimev1pb.TopicEventResponse{ Status: runtimev1pb.TopicEventResponse_SUCCESS, }, nil @@ -98,7 +97,7 @@ scopes: func (o *basic) Run(t *testing.T, ctx context.Context) { o.daprd.WaitUntilRunning(t, ctx) - conn, err := grpc.DialContext(ctx, fmt.Sprintf("localhost:%d", o.daprd.GRPCPort()), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) + conn, err := grpc.DialContext(ctx, o.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, conn.Close()) }) diff --git a/tests/integration/suite/daprd/outbox/http/basic.go b/tests/integration/suite/daprd/outbox/http/basic.go index 7e443fe8c8d..85efb70d054 100644 --- a/tests/integration/suite/daprd/outbox/http/basic.go +++ b/tests/integration/suite/daprd/outbox/http/basic.go @@ -171,6 +171,7 @@ func (o *basic) Run(t *testing.T, ctx context.Context) { var ce map[string]string err = json.Unmarshal(body, &ce) + //nolint:testifylint assert.NoError(c, err) assert.Equal(c, "2", ce["data"]) }, time.Second*10, time.Millisecond*100) diff --git a/tests/integration/suite/daprd/pluggable/basic.go b/tests/integration/suite/daprd/pluggable/basic.go index 8f1f2aea395..92b7db48092 100644 --- a/tests/integration/suite/daprd/pluggable/basic.go +++ b/tests/integration/suite/daprd/pluggable/basic.go @@ -122,13 +122,13 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { StoreName: "mystore", Key: "key2", }) require.NoError(t, err) - etag2 := resp.Etag + etag2 := resp.GetEtag() resp, err = client.GetState(ctx, &rtv1.GetStateRequest{ StoreName: "mystore", Key: "key4", }) require.NoError(t, err) - etag4 := resp.Etag + etag4 := resp.GetEtag() { resp, err := client.GetBulkState(ctx, &rtv1.GetBulkStateRequest{ @@ -136,28 +136,28 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { Keys: []string{"key1", "key2", "key3", "key4"}, }) require.NoError(t, err) - require.Len(t, resp.Items, 4) - assert.Equal(t, "key1", resp.Items[0].Key) - assert.Equal(t, "value1", string(resp.Items[0].Data)) - assert.Empty(t, resp.Items[0].Metadata) - - assert.Equal(t, "key2", resp.Items[1].Key) - assert.Equal(t, "value2", string(resp.Items[1].Data)) - assert.Equal(t, etag2, resp.Items[1].GetEtag()) - - assert.Equal(t, "key3", resp.Items[2].Key) - assert.Equal(t, "value3", string(resp.Items[2].Data)) - if assert.Contains(t, resp.Items[2].Metadata, "ttlExpireTime") { - expireTime, eerr := time.Parse(time.RFC3339, resp.Items[2].Metadata["ttlExpireTime"]) + require.Len(t, resp.GetItems(), 4) + assert.Equal(t, "key1", resp.GetItems()[0].GetKey()) + assert.Equal(t, "value1", string(resp.GetItems()[0].GetData())) + assert.Empty(t, resp.GetItems()[0].GetMetadata()) + + assert.Equal(t, "key2", resp.GetItems()[1].GetKey()) + assert.Equal(t, "value2", string(resp.GetItems()[1].GetData())) + assert.Equal(t, etag2, resp.GetItems()[1].GetEtag()) + + assert.Equal(t, "key3", resp.GetItems()[2].GetKey()) + assert.Equal(t, "value3", string(resp.GetItems()[2].GetData())) + if assert.Contains(t, resp.GetItems()[2].GetMetadata(), "ttlExpireTime") { + expireTime, eerr := time.Parse(time.RFC3339, resp.GetItems()[2].GetMetadata()["ttlExpireTime"]) require.NoError(t, eerr) assert.WithinDuration(t, now.Add(time.Second), expireTime, time.Second) } - assert.Equal(t, "key4", resp.Items[3].Key) - assert.Equal(t, "value4", string(resp.Items[3].Data)) - assert.Equal(t, etag4, resp.Items[3].GetEtag()) - if assert.Contains(t, resp.Items[3].Metadata, "ttlExpireTime") { - expireTime, eerr := time.Parse(time.RFC3339, resp.Items[3].Metadata["ttlExpireTime"]) + assert.Equal(t, "key4", resp.GetItems()[3].GetKey()) + assert.Equal(t, "value4", string(resp.GetItems()[3].GetData())) + assert.Equal(t, etag4, resp.GetItems()[3].GetEtag()) + if assert.Contains(t, resp.GetItems()[3].GetMetadata(), "ttlExpireTime") { + expireTime, eerr := time.Parse(time.RFC3339, resp.GetItems()[3].GetMetadata()["ttlExpireTime"]) require.NoError(t, eerr) assert.WithinDuration(t, now.Add(time.Second), expireTime, time.Second) } @@ -166,7 +166,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { StoreName: "mystore", Key: "key1", }) - assert.NoError(t, err) + require.NoError(t, err) _, err = client.DeleteBulkState(ctx, &rtv1.DeleteBulkStateRequest{ StoreName: "mystore", States: []*commonv1.StateItem{ @@ -175,7 +175,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) } assert.EventuallyWithT(t, func(c *assert.CollectT) { @@ -185,7 +185,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { Key: key, }) require.NoError(t, err) - assert.Empty(c, resp.Data) + assert.Empty(c, resp.GetData()) } }, time.Second*2, time.Millisecond*100) } diff --git a/tests/integration/suite/daprd/pubsub/grpc/compname.go b/tests/integration/suite/daprd/pubsub/grpc/compname.go index 072fbecdfe0..2399b9ac6f2 100644 --- a/tests/integration/suite/daprd/pubsub/grpc/compname.go +++ b/tests/integration/suite/daprd/pubsub/grpc/compname.go @@ -97,7 +97,7 @@ func (c *componentName) Run(t *testing.T, ctx context.Context) { pubsubName := c.pubsubNames[i] topicName := c.topicNames[i] pt.Add(func(col *assert.CollectT) { - conn, err := grpc.DialContext(ctx, fmt.Sprintf("localhost:%d", c.daprd.GRPCPort()), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) + conn, err := grpc.DialContext(ctx, c.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) require.NoError(col, err) t.Cleanup(func() { require.NoError(t, conn.Close()) }) @@ -107,6 +107,7 @@ func (c *componentName) Run(t *testing.T, ctx context.Context) { Topic: topicName, Data: []byte(`{"status": "completed"}`), }) + //nolint:testifylint assert.NoError(col, err) }) } diff --git a/tests/integration/suite/daprd/resiliency/apps/defaulttimeout.go b/tests/integration/suite/daprd/resiliency/apps/defaulttimeout.go index a239b8ff4a7..76bbac9b721 100644 --- a/tests/integration/suite/daprd/resiliency/apps/defaulttimeout.go +++ b/tests/integration/suite/daprd/resiliency/apps/defaulttimeout.go @@ -88,6 +88,6 @@ func (d *defaulttimeout) Run(t *testing.T, ctx context.Context) { assert.Equal(t, http.StatusOK, resp.StatusCode) body, err := io.ReadAll(resp.Body) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) assert.Equal(t, "GET", string(body)) } diff --git a/tests/integration/suite/daprd/resources/uniquename.go b/tests/integration/suite/daprd/resources/uniquename.go index 044a3978f27..eeff7bc69e3 100644 --- a/tests/integration/suite/daprd/resources/uniquename.go +++ b/tests/integration/suite/daprd/resources/uniquename.go @@ -20,7 +20,6 @@ import ( "path/filepath" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/dapr/dapr/tests/integration/framework" @@ -75,7 +74,7 @@ spec: daprd.WithExecOptions( exec.WithExitCode(1), exec.WithRunError(func(t *testing.T, err error) { - assert.ErrorContains(t, err, "exit status 1") + require.ErrorContains(t, err, "exit status 1") }), exec.WithStdout(u.logline.Stdout()), ), diff --git a/tests/integration/suite/daprd/serviceinvocation/grpc/basic.go b/tests/integration/suite/daprd/serviceinvocation/grpc/basic.go index 6b57cbfdb13..98b7e2c7fae 100644 --- a/tests/integration/suite/daprd/serviceinvocation/grpc/basic.go +++ b/tests/integration/suite/daprd/serviceinvocation/grpc/basic.go @@ -52,7 +52,7 @@ func (b *basic) Setup(t *testing.T) []framework.Option { case "foo": var verb int var data []byte - switch in.HttpExtension.Verb { + switch in.GetHttpExtension().GetVerb() { case commonv1.HTTPExtension_PATCH: data = []byte("PATCH") verb = http.StatusNoContent @@ -132,15 +132,15 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { }) require.NoError(t, err) - return resp.Data.Value, resp.ContentType + return resp.GetData().GetValue(), resp.GetContentType() } for _, ts := range []struct { host string hostID string }{ - {host: fmt.Sprintf("localhost:%d", b.daprd1.GRPCPort()), hostID: b.daprd2.AppID()}, - {host: fmt.Sprintf("localhost:%d", b.daprd2.GRPCPort()), hostID: b.daprd1.AppID()}, + {host: b.daprd1.GRPCAddress(), hostID: b.daprd2.AppID()}, + {host: b.daprd2.GRPCAddress(), hostID: b.daprd1.AppID()}, } { t.Run(ts.host, func(t *testing.T) { body, contentType := doReq(ts.host, ts.hostID, commonv1.HTTPExtension_GET) @@ -167,7 +167,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { }) t.Run("method doesn't exist", func(t *testing.T) { - host := fmt.Sprintf("localhost:%d", b.daprd1.GRPCPort()) + host := b.daprd1.GRPCAddress() conn, err := grpc.DialContext(ctx, host, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), @@ -190,7 +190,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { }) t.Run("no method", func(t *testing.T) { - host := fmt.Sprintf("localhost:%d", b.daprd1.GRPCPort()) + host := b.daprd1.GRPCAddress() conn, err := grpc.DialContext(ctx, host, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), @@ -213,7 +213,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { }) t.Run("multiple segments", func(t *testing.T) { - host := fmt.Sprintf("localhost:%d", b.daprd1.GRPCPort()) + host := b.daprd1.GRPCAddress() conn, err := grpc.DialContext(ctx, host, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), @@ -229,14 +229,14 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { }, }) require.NoError(t, err) - assert.Equal(t, "multiple/segments", string(resp.Data.Value)) - assert.Equal(t, "application/json", resp.ContentType) + assert.Equal(t, "multiple/segments", string(resp.GetData().GetValue())) + assert.Equal(t, "application/json", resp.GetContentType()) }) pt := util.NewParallel(t) for i := 0; i < 100; i++ { pt.Add(func(c *assert.CollectT) { - host := fmt.Sprintf("localhost:%d", b.daprd1.GRPCPort()) + host := b.daprd1.GRPCAddress() conn, err := grpc.DialContext(ctx, host, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) require.NoError(c, err) t.Cleanup(func() { require.NoError(c, conn.Close()) }) @@ -250,13 +250,13 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { }) require.NoError(c, err) - assert.Equal(c, "POST", string(resp.Data.Value)) - assert.Equal(c, "201", resp.ContentType) + assert.Equal(c, "POST", string(resp.GetData().GetValue())) + assert.Equal(c, "201", resp.GetContentType()) }) } t.Run("type URL", func(t *testing.T) { - host := fmt.Sprintf("localhost:%d", b.daprd1.GRPCPort()) + host := b.daprd1.GRPCAddress() conn, err := grpc.DialContext(ctx, host, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), @@ -276,7 +276,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { }) require.NoError(t, err) require.NotNil(t, resp.GetData()) - assert.Equal(t, "emozioni di settembre/pfm", string(resp.Data.Value)) - assert.Equal(t, "mytype", resp.Data.TypeUrl) + assert.Equal(t, "emozioni di settembre/pfm", string(resp.GetData().GetValue())) + assert.Equal(t, "mytype", resp.GetData().GetTypeUrl()) }) } diff --git a/tests/integration/suite/daprd/serviceinvocation/grpc/fuzz.go b/tests/integration/suite/daprd/serviceinvocation/grpc/fuzz.go index 24714691f2d..aa9e281e07c 100644 --- a/tests/integration/suite/daprd/serviceinvocation/grpc/fuzz.go +++ b/tests/integration/suite/daprd/serviceinvocation/grpc/fuzz.go @@ -53,7 +53,7 @@ func (f *fuzzgrpc) Setup(t *testing.T) []framework.Option { onInvoke := func(ctx context.Context, in *commonv1.InvokeRequest) (*commonv1.InvokeResponse, error) { return &commonv1.InvokeResponse{ - Data: &anypb.Any{Value: in.Data.Value}, + Data: &anypb.Any{Value: in.GetData().GetValue()}, }, nil } @@ -110,7 +110,7 @@ func (f *fuzzgrpc) Run(t *testing.T, ctx context.Context) { if len(body) == 0 { body = nil } - assert.Equal(c, body, resp.Data.GetValue()) + assert.Equal(c, body, resp.GetData().GetValue()) }) } } diff --git a/tests/integration/suite/daprd/serviceinvocation/grpc/slowappstartup.go b/tests/integration/suite/daprd/serviceinvocation/grpc/slowappstartup.go index f20253c7795..fa09354463f 100644 --- a/tests/integration/suite/daprd/serviceinvocation/grpc/slowappstartup.go +++ b/tests/integration/suite/daprd/serviceinvocation/grpc/slowappstartup.go @@ -49,7 +49,7 @@ type slowappstartup struct { func (s *slowappstartup) Setup(t *testing.T) []framework.Option { onInvoke := func(ctx context.Context, in *commonv1.InvokeRequest) (*commonv1.InvokeResponse, error) { - assert.Equal(t, "Ping", in.Method) + assert.Equal(t, "Ping", in.GetMethod()) resp, err := anypb.New(new(testpb.PingResponse)) if err != nil { return nil, err @@ -88,7 +88,7 @@ func (s *slowappstartup) Run(t *testing.T, ctx context.Context) { s.daprd.WaitUntilRunning(t, ctx) s.daprd.WaitUntilAppHealth(t, ctx) - conn, err := grpc.DialContext(ctx, "localhost:"+strconv.Itoa(s.daprd.GRPCPort()), + conn, err := grpc.DialContext(ctx, s.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), ) @@ -110,9 +110,10 @@ func (s *slowappstartup) Run(t *testing.T, ctx context.Context) { }) // This function must only return that the app is not in a healthy state // until the app is in a healthy state. + //nolint:testifylint if !assert.NoError(c, err) { require.ErrorContains(c, err, "app is not in a healthy state") } }, time.Second*3, time.Millisecond*100) - assert.NoError(t, resp.Data.UnmarshalTo(&pingResp)) + require.NoError(t, resp.GetData().UnmarshalTo(&pingResp)) } diff --git a/tests/integration/suite/daprd/serviceinvocation/http/basic.go b/tests/integration/suite/daprd/serviceinvocation/http/basic.go index b037db91567..81e3be272a1 100644 --- a/tests/integration/suite/daprd/serviceinvocation/http/basic.go +++ b/tests/integration/suite/daprd/serviceinvocation/http/basic.go @@ -168,7 +168,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { resp, err := util.HTTPClient(t).Do(req) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) }) t.Run("method doesn't exist", func(t *testing.T) { @@ -178,7 +178,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { resp, err := util.HTTPClient(t).Do(req) require.NoError(t, err) assert.Equal(t, http.StatusNotFound, resp.StatusCode) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) }) t.Run("no method", func(t *testing.T) { @@ -188,7 +188,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { resp, err := util.HTTPClient(t).Do(req) require.NoError(t, err) assert.Equal(t, http.StatusNotFound, resp.StatusCode) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) reqURL = fmt.Sprintf("http://localhost:%d/", b.daprd1.HTTPPort()) req, err = http.NewRequestWithContext(ctx, http.MethodPost, reqURL, nil) @@ -197,7 +197,7 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { resp, err = util.HTTPClient(t).Do(req) require.NoError(t, err) assert.Equal(t, http.StatusNotFound, resp.StatusCode) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) }) t.Run("multiple segments", func(t *testing.T) { @@ -211,7 +211,6 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { require.NoError(t, err) require.NoError(t, resp.Body.Close()) assert.Equal(t, "ok", string(body)) - assert.NoError(t, resp.Body.Close()) }) client := util.HTTPClient(t) @@ -230,7 +229,6 @@ func (b *basic) Run(t *testing.T, ctx context.Context) { require.NoError(t, resp.Body.Close()) assert.Equal(t, "POST", resp.Header.Get("x-method")) assert.Equal(t, u, string(body)) - assert.NoError(t, resp.Body.Close()) }) } } diff --git a/tests/integration/suite/daprd/state/grpc/basic.go b/tests/integration/suite/daprd/state/grpc/basic.go index eb1852f55d6..962b54dfe4e 100644 --- a/tests/integration/suite/daprd/state/grpc/basic.go +++ b/tests/integration/suite/daprd/state/grpc/basic.go @@ -15,7 +15,6 @@ package grpc import ( "context" - "fmt" "testing" "github.com/stretchr/testify/require" @@ -48,7 +47,7 @@ func (b *basic) Setup(t *testing.T) []framework.Option { func (b *basic) Run(t *testing.T, ctx context.Context) { b.daprd.WaitUntilRunning(t, ctx) - conn, err := grpc.DialContext(ctx, fmt.Sprintf("localhost:%d", b.daprd.GRPCPort()), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) + conn, err := grpc.DialContext(ctx, b.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, conn.Close()) }) client := rtv1.NewDaprClient(conn) diff --git a/tests/integration/suite/daprd/state/grpc/compname.go b/tests/integration/suite/daprd/state/grpc/compname.go index 933b6966dc2..1383a3dea4f 100644 --- a/tests/integration/suite/daprd/state/grpc/compname.go +++ b/tests/integration/suite/daprd/state/grpc/compname.go @@ -94,7 +94,7 @@ func (c *componentName) Run(t *testing.T, ctx context.Context) { for _, storeName := range c.storeNames { storeName := storeName pt.Add(func(col *assert.CollectT) { - conn, err := grpc.DialContext(ctx, fmt.Sprintf("localhost:%d", c.daprd.GRPCPort()), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) + conn, err := grpc.DialContext(ctx, c.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) require.NoError(col, err) t.Cleanup(func() { require.NoError(t, conn.Close()) }) @@ -123,14 +123,14 @@ func (c *componentName) Run(t *testing.T, ctx context.Context) { Key: "key1", }) require.NoError(col, err) - assert.Equal(col, "value1", string(resp.Data)) + assert.Equal(col, "value1", string(resp.GetData())) resp, err = client.GetState(ctx, &rtv1.GetStateRequest{ StoreName: storeName, Key: "key2", }) require.NoError(col, err) - assert.Equal(col, "value2", string(resp.Data)) + assert.Equal(col, "value2", string(resp.GetData())) }) } } diff --git a/tests/integration/suite/daprd/state/grpc/fuzz.go b/tests/integration/suite/daprd/state/grpc/fuzz.go index 4f3cd186692..29ae14edbc9 100644 --- a/tests/integration/suite/daprd/state/grpc/fuzz.go +++ b/tests/integration/suite/daprd/state/grpc/fuzz.go @@ -169,7 +169,7 @@ spec: func (f *fuzzstate) Run(t *testing.T, ctx context.Context) { f.daprd.WaitUntilRunning(t, ctx) - conn, err := grpc.DialContext(ctx, fmt.Sprintf("localhost:%d", f.daprd.GRPCPort()), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) + conn, err := grpc.DialContext(ctx, f.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, conn.Close()) }) client := rtv1.NewDaprClient(conn) @@ -184,7 +184,7 @@ func (f *fuzzstate) Run(t *testing.T, ctx context.Context) { Key: f.getFuzzKeys[i], }) require.NoError(t, err) - assert.Empty(t, resp.Data, "key: %s", f.getFuzzKeys[i]) + assert.Empty(t, resp.GetData(), "key: %s", f.getFuzzKeys[i]) }) } }) @@ -219,22 +219,22 @@ func (f *fuzzstate) Run(t *testing.T, ctx context.Context) { for _, s := range append(f.saveReqStrings[i], f.saveReqBinaries[i]...) { resp, err := client.GetState(ctx, &rtv1.GetStateRequest{ StoreName: f.storeName, - Key: s.Key, + Key: s.GetKey(), }) require.NoError(t, err) - assert.Equalf(t, s.Value, resp.Data, "orig=%s got=%s", s.Value, resp.Data) + assert.Equalf(t, s.GetValue(), resp.GetData(), "orig=%s got=%s", s.GetValue(), resp.GetData()) } for _, s := range f.saveReqBinariesHTTP[i] { resp, err := client.GetState(ctx, &rtv1.GetStateRequest{ StoreName: f.storeName, - Key: s.Key, + Key: s.GetKey(), }) require.NoError(t, err) // TODO: Even though we are getting gRPC, the binary data was stored // with HTTP, so it was base64 encoded. - val := `"` + base64.StdEncoding.EncodeToString(s.Value) + `"` - assert.Equalf(t, val, string(resp.Data), "orig=%s got=%s", val, resp.Data) + val := `"` + base64.StdEncoding.EncodeToString(s.GetValue()) + `"` + assert.Equalf(t, val, string(resp.GetData()), "orig=%s got=%s", val, resp.GetData()) } }) diff --git a/tests/integration/suite/daprd/state/grpc/ttl.go b/tests/integration/suite/daprd/state/grpc/ttl.go index edd0c5f76a2..64985b6d878 100644 --- a/tests/integration/suite/daprd/state/grpc/ttl.go +++ b/tests/integration/suite/daprd/state/grpc/ttl.go @@ -15,7 +15,6 @@ package grpc import ( "context" - "fmt" "testing" "time" @@ -50,7 +49,7 @@ func (l *ttl) Setup(t *testing.T) []framework.Option { func (l *ttl) Run(t *testing.T, ctx context.Context) { l.daprd.WaitUntilRunning(t, ctx) - conn, err := grpc.DialContext(ctx, fmt.Sprintf("localhost:%d", l.daprd.GRPCPort()), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) + conn, err := grpc.DialContext(ctx, l.daprd.GRPCAddress(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, conn.Close()) }) client := rtv1.NewDaprClient(conn) @@ -69,7 +68,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { StoreName: "mystore", Key: "key1", }) require.NoError(t, err) - assert.Equal(t, "value1", string(resp.Data)) + assert.Equal(t, "value1", string(resp.GetData())) ttlExpireTime, err := time.Parse(time.RFC3339, resp.GetMetadata()["ttlExpireTime"]) require.NoError(t, err) assert.InDelta(t, now.Add(3*time.Second).Unix(), ttlExpireTime.Unix(), 1) @@ -81,7 +80,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { StoreName: "mystore", Key: "key1", }) require.NoError(c, err) - assert.Empty(c, resp.Data) + assert.Empty(c, resp.GetData()) }, 5*time.Second, 100*time.Millisecond) }) } diff --git a/tests/integration/suite/daprd/state/http/ttl.go b/tests/integration/suite/daprd/state/http/ttl.go index 281d7165591..98c4748e525 100644 --- a/tests/integration/suite/daprd/state/http/ttl.go +++ b/tests/integration/suite/daprd/state/http/ttl.go @@ -62,7 +62,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { require.NoError(t, err) resp, err := client.Do(req) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) assert.Equal(t, http.StatusNoContent, resp.StatusCode) }) @@ -74,7 +74,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { assert.Equal(t, http.StatusOK, resp.StatusCode) body, err := io.ReadAll(resp.Body) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) assert.Equal(t, `"value1"`, string(body)) ttlExpireTimeStr := resp.Header.Get("metadata.ttlExpireTime") @@ -90,7 +90,7 @@ func (l *ttl) Run(t *testing.T, ctx context.Context) { require.NoError(c, err) resp, err := client.Do(req) require.NoError(c, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) assert.Equal(c, http.StatusNoContent, resp.StatusCode) }, 5*time.Second, 100*time.Millisecond) }) diff --git a/tests/integration/suite/placement/apilevel/shared.go b/tests/integration/suite/placement/apilevel/shared.go index c30b0aebeef..6bf1c8f24a7 100644 --- a/tests/integration/suite/placement/apilevel/shared.go +++ b/tests/integration/suite/placement/apilevel/shared.go @@ -152,11 +152,9 @@ func registerHostFailing(t *testing.T, ctx context.Context, conn *grpc.ClientCon } // Checks the API level reported in the state tables matched. -func checkAPILevelInState(t assert.TestingT, client *http.Client, port int, expectAPILevel int) (tableVersion int) { +func checkAPILevelInState(t require.TestingT, client *http.Client, port int, expectAPILevel int) (tableVersion int) { res, err := client.Get(fmt.Sprintf("http://localhost:%d/placement/state", port)) - if !assert.NoError(t, err) { - return - } + require.NoError(t, err) defer res.Body.Close() stateRes := struct { @@ -164,9 +162,7 @@ func checkAPILevelInState(t assert.TestingT, client *http.Client, port int, expe TableVersion int `json:"tableVersion"` }{} err = json.NewDecoder(res.Body).Decode(&stateRes) - if !assert.NoError(t, err) { - return - } + require.NoError(t, err) assert.Equal(t, expectAPILevel, stateRes.APILevel) diff --git a/tests/integration/suite/placement/authz/mtls.go b/tests/integration/suite/placement/authz/mtls.go index 9cef0c7a8c5..2e289c702cf 100644 --- a/tests/integration/suite/placement/authz/mtls.go +++ b/tests/integration/suite/placement/authz/mtls.go @@ -17,7 +17,6 @@ import ( "context" "os" "path/filepath" - "strconv" "testing" "time" @@ -53,7 +52,7 @@ func (m *mtls) Setup(t *testing.T) []framework.Option { require.NoError(t, os.WriteFile(taFile, m.sentry.CABundle().TrustAnchors, 0o600)) m.place = placement.New(t, placement.WithEnableTLS(true), - placement.WithSentryAddress("localhost:"+strconv.Itoa(m.sentry.Port())), + placement.WithSentryAddress(m.sentry.Address()), placement.WithTrustAnchorsFile(taFile), ) @@ -67,7 +66,7 @@ func (m *mtls) Run(t *testing.T, ctx context.Context) { m.place.WaitUntilRunning(t, ctx) secProv, err := security.New(ctx, security.Options{ - SentryAddress: "localhost:" + strconv.Itoa(m.sentry.Port()), + SentryAddress: m.sentry.Address(), ControlPlaneTrustDomain: "localhost", ControlPlaneNamespace: "default", TrustAnchors: m.sentry.CABundle().TrustAnchors, @@ -90,7 +89,7 @@ func (m *mtls) Run(t *testing.T, ctx context.Context) { placeID, err := spiffeid.FromSegments(sec.ControlPlaneTrustDomain(), "ns", "default", "dapr-placement") require.NoError(t, err) - host := "localhost:" + strconv.Itoa(m.place.Port()) + host := m.place.Address() conn, err := grpc.DialContext(ctx, host, grpc.WithBlock(), sec.GRPCDialOptionMTLS(placeID)) require.NoError(t, err) @@ -99,15 +98,15 @@ func (m *mtls) Run(t *testing.T, ctx context.Context) { // Can only create hosts where the app ID match. stream := establishStream(t, ctx, client) - assert.NoError(t, stream.Send(&v1pb.Host{ + require.NoError(t, stream.Send(&v1pb.Host{ Id: "app-1", })) waitForUnlock(t, stream) _, err = stream.Recv() - assert.NoError(t, err) + require.NoError(t, err) stream = establishStream(t, ctx, client) - assert.NoError(t, stream.Send(&v1pb.Host{ + require.NoError(t, stream.Send(&v1pb.Host{ Id: "app-2", })) waitForUnlock(t, stream) @@ -120,8 +119,9 @@ func waitForUnlock(t *testing.T, stream v1pb.Placement_ReportDaprStatusClient) { t.Helper() assert.EventuallyWithT(t, func(c *assert.CollectT) { resp, err := stream.Recv() + //nolint:testifylint if assert.NoError(c, err) { - assert.Equal(c, "unlock", resp.Operation) + assert.Equal(c, "unlock", resp.GetOperation()) } }, time.Second*5, time.Millisecond*100) } @@ -132,13 +132,16 @@ func establishStream(t *testing.T, ctx context.Context, client v1pb.PlacementCli assert.EventuallyWithT(t, func(c *assert.CollectT) { var err error stream, err = client.ReportDaprStatus(ctx) + //nolint:testifylint if !assert.NoError(c, err) { return } + //nolint:testifylint if assert.NoError(c, stream.Send(&v1pb.Host{ Id: "app-1", })) { _, err = stream.Recv() + //nolint:testifylint assert.NoError(c, err) } }, time.Second*5, time.Millisecond*100) diff --git a/tests/integration/suite/placement/authz/nomtls.go b/tests/integration/suite/placement/authz/nomtls.go index cec834f2c2e..df3d4e9ad16 100644 --- a/tests/integration/suite/placement/authz/nomtls.go +++ b/tests/integration/suite/placement/authz/nomtls.go @@ -15,10 +15,8 @@ package authz import ( "context" - "strconv" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" grpcinsecure "google.golang.org/grpc/credentials/insecure" @@ -49,7 +47,7 @@ func (n *nomtls) Setup(t *testing.T) []framework.Option { func (n *nomtls) Run(t *testing.T, ctx context.Context) { n.place.WaitUntilRunning(t, ctx) - host := "localhost:" + strconv.Itoa(n.place.Port()) + host := n.place.Address() conn, err := grpc.DialContext(ctx, host, grpc.WithBlock(), grpc.WithReturnConnectionError(), grpc.WithTransportCredentials(grpcinsecure.NewCredentials()), ) @@ -60,14 +58,14 @@ func (n *nomtls) Run(t *testing.T, ctx context.Context) { // Can create hosts with any appIDs or namespaces. stream := establishStream(t, ctx, client) - assert.NoError(t, stream.Send(new(v1pb.Host))) + require.NoError(t, stream.Send(new(v1pb.Host))) waitForUnlock(t, stream) _, err = stream.Recv() - assert.NoError(t, err) + require.NoError(t, err) stream = establishStream(t, ctx, client) - assert.NoError(t, stream.Send(&v1pb.Host{Name: "bar"})) + require.NoError(t, stream.Send(&v1pb.Host{Name: "bar"})) waitForUnlock(t, stream) _, err = stream.Recv() - assert.NoError(t, err) + require.NoError(t, err) } diff --git a/tests/integration/suite/placement/quorum/insecure.go b/tests/integration/suite/placement/quorum/insecure.go index 9a30b7a263a..65738568210 100644 --- a/tests/integration/suite/placement/quorum/insecure.go +++ b/tests/integration/suite/placement/quorum/insecure.go @@ -59,7 +59,7 @@ func (i *insecure) Setup(t *testing.T) []framework.Option { placement.WithInitialClusterPorts(fp.Port(t, 0), fp.Port(t, 1), fp.Port(t, 2)), placement.WithEnableTLS(true), placement.WithTrustAnchorsFile(taFile), - placement.WithSentryAddress("localhost:" + strconv.Itoa(i.sentry.Port())), + placement.WithSentryAddress(i.sentry.Address()), } i.places = []*placement.Placement{ placement.New(t, append(opts, placement.WithID("p1"))...), @@ -80,7 +80,7 @@ func (i *insecure) Run(t *testing.T, ctx context.Context) { i.places[2].WaitUntilRunning(t, ctx) secProv, err := security.New(ctx, security.Options{ - SentryAddress: "localhost:" + strconv.Itoa(i.sentry.Port()), + SentryAddress: i.sentry.Address(), ControlPlaneTrustDomain: "localhost", ControlPlaneNamespace: "default", TrustAnchors: i.sentry.CABundle().TrustAnchors, @@ -148,11 +148,11 @@ func (i *insecure) Run(t *testing.T, ctx context.Context) { assert.EventuallyWithT(t, func(c *assert.CollectT) { o, err := stream.Recv() require.NoError(t, err) - assert.Equal(c, "update", o.Operation) + assert.Equal(c, "update", o.GetOperation()) if assert.NotNil(c, o.GetTables()) { - assert.Len(c, o.GetTables().Entries, 2) - assert.Contains(c, o.GetTables().Entries, "entity-1") - assert.Contains(c, o.GetTables().Entries, "entity-2") + assert.Len(c, o.GetTables().GetEntries(), 2) + assert.Contains(c, o.GetTables().GetEntries(), "entity-1") + assert.Contains(c, o.GetTables().GetEntries(), "entity-2") } }, time.Second*20, time.Millisecond*100) } diff --git a/tests/integration/suite/placement/quorum/jwks.go b/tests/integration/suite/placement/quorum/jwks.go index 55830ff435c..64da8324941 100644 --- a/tests/integration/suite/placement/quorum/jwks.go +++ b/tests/integration/suite/placement/quorum/jwks.go @@ -99,7 +99,7 @@ spec: placement.WithInitialClusterPorts(fp.Port(t, 0), fp.Port(t, 1), fp.Port(t, 2)), placement.WithEnableTLS(true), placement.WithTrustAnchorsFile(taFile), - placement.WithSentryAddress("localhost:" + strconv.Itoa(j.sentry.Port())), + placement.WithSentryAddress(j.sentry.Address()), } j.places = []*placement.Placement{ placement.New(t, append(opts, placement.WithID("p1"), @@ -125,7 +125,7 @@ func (j *jwks) Run(t *testing.T, ctx context.Context) { t.Setenv("DAPR_SENTRY_TOKEN_FILE", j.appTokenFile) secProv, err := security.New(ctx, security.Options{ - SentryAddress: "localhost:" + strconv.Itoa(j.sentry.Port()), + SentryAddress: j.sentry.Address(), ControlPlaneTrustDomain: "localhost", ControlPlaneNamespace: "default", TrustAnchors: j.sentry.CABundle().TrustAnchors, @@ -156,7 +156,7 @@ func (j *jwks) Run(t *testing.T, ctx context.Context) { if i >= 3 { i = 0 } - host := "localhost:" + strconv.Itoa(j.places[i].Port()) + host := j.places[i].Address() conn, cerr := grpc.DialContext(ctx, host, grpc.WithBlock(), grpc.WithReturnConnectionError(), sec.GRPCDialOptionMTLS(placeID), ) @@ -193,11 +193,11 @@ func (j *jwks) Run(t *testing.T, ctx context.Context) { assert.EventuallyWithT(t, func(c *assert.CollectT) { o, err := stream.Recv() require.NoError(t, err) - assert.Equal(c, "update", o.Operation) + assert.Equal(c, "update", o.GetOperation()) if assert.NotNil(c, o.GetTables()) { - assert.Len(c, o.GetTables().Entries, 2) - assert.Contains(c, o.GetTables().Entries, "entity-1") - assert.Contains(c, o.GetTables().Entries, "entity-2") + assert.Len(c, o.GetTables().GetEntries(), 2) + assert.Contains(c, o.GetTables().GetEntries(), "entity-1") + assert.Contains(c, o.GetTables().GetEntries(), "entity-2") } }, time.Second*20, time.Millisecond*100) } diff --git a/tests/integration/suite/placement/quorum/notls.go b/tests/integration/suite/placement/quorum/notls.go index cfbb9c9b124..97009a6a43c 100644 --- a/tests/integration/suite/placement/quorum/notls.go +++ b/tests/integration/suite/placement/quorum/notls.go @@ -16,7 +16,6 @@ package quorum import ( "context" "fmt" - "strconv" "testing" "time" @@ -72,7 +71,7 @@ func (n *notls) Run(t *testing.T, ctx context.Context) { if j >= 3 { j = 0 } - host := "localhost:" + strconv.Itoa(n.places[j].Port()) + host := n.places[j].Address() conn, err := grpc.DialContext(ctx, host, grpc.WithBlock(), grpc.WithReturnConnectionError(), grpc.WithTransportCredentials(grpcinsecure.NewCredentials()), ) @@ -109,11 +108,11 @@ func (n *notls) Run(t *testing.T, ctx context.Context) { assert.EventuallyWithT(t, func(c *assert.CollectT) { o, err := stream.Recv() require.NoError(t, err) - assert.Equal(c, "update", o.Operation) + assert.Equal(c, "update", o.GetOperation()) if assert.NotNil(c, o.GetTables()) { - assert.Len(c, o.GetTables().Entries, 2) - assert.Contains(c, o.GetTables().Entries, "entity-1") - assert.Contains(c, o.GetTables().Entries, "entity-2") + assert.Len(c, o.GetTables().GetEntries(), 2) + assert.Contains(c, o.GetTables().GetEntries(), "entity-1") + assert.Contains(c, o.GetTables().GetEntries(), "entity-2") } }, time.Second*20, time.Millisecond*100) } diff --git a/tests/integration/suite/ports/operator.go b/tests/integration/suite/ports/operator.go index fd249042c1f..81d438564e6 100644 --- a/tests/integration/suite/ports/operator.go +++ b/tests/integration/suite/ports/operator.go @@ -19,7 +19,6 @@ import ( "net" "os" "path/filepath" - "strconv" "testing" "time" @@ -58,7 +57,7 @@ func (o *operator) Setup(t *testing.T) []framework.Option { Spec: configapi.ConfigurationSpec{ MTLSSpec: &configapi.MTLSSpec{ ControlPlaneTrustDomain: "integration.test.dapr.io", - SentryAddress: "localhost:" + strconv.Itoa(sentry.Port()), + SentryAddress: sentry.Address(), }, }, }), @@ -91,6 +90,7 @@ func (o *operator) Run(t *testing.T, ctx context.Context) { } { assert.EventuallyWithT(t, func(t *assert.CollectT) { conn, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("localhost:%d", port)) + //nolint:testifylint _ = assert.NoError(t, err) && assert.NoError(t, conn.Close()) }, time.Second*5, 100*time.Millisecond, "port %s (:%d) was not available in time", name, port) } diff --git a/tests/integration/suite/sentry/metrics/expiry.go b/tests/integration/suite/sentry/metrics/expiry.go index 5f6f9f334b2..99ed9baf19f 100644 --- a/tests/integration/suite/sentry/metrics/expiry.go +++ b/tests/integration/suite/sentry/metrics/expiry.go @@ -76,7 +76,7 @@ func (e *expiry) Run(t *testing.T, ctx context.Context) { respBody, err := io.ReadAll(resp.Body) require.NoError(t, err) - assert.NoError(t, resp.Body.Close()) + require.NoError(t, resp.Body.Close()) for _, line := range bytes.Split(respBody, []byte("\n")) { if len(line) == 0 || line[0] == '#' { diff --git a/tests/integration/suite/sentry/validator/insecure/insecure.go b/tests/integration/suite/sentry/validator/insecure/insecure.go index 032515c1a32..53152d78b41 100644 --- a/tests/integration/suite/sentry/validator/insecure/insecure.go +++ b/tests/integration/suite/sentry/validator/insecure/insecure.go @@ -100,7 +100,7 @@ func (m *insecure) Run(t *testing.T, parentCtx context.Context) { TokenValidator: sentrypbv1.SignCertificateRequest_INSECURE, }) require.NoError(t, err) - require.NotEmpty(t, res.WorkloadCertificate) + require.NotEmpty(t, res.GetWorkloadCertificate()) validateCertificateResponse(t, res, m.proc.CABundle(), defaultAppSPIFFEID, defaultAppDNSName) }) @@ -115,7 +115,7 @@ func (m *insecure) Run(t *testing.T, parentCtx context.Context) { // TokenValidator: sentrypbv1.SignCertificateRequest_INSECURE, }) require.NoError(t, err) - require.NotEmpty(t, res.WorkloadCertificate) + require.NotEmpty(t, res.GetWorkloadCertificate()) validateCertificateResponse(t, res, m.proc.CABundle(), defaultAppSPIFFEID, defaultAppDNSName) }) @@ -175,9 +175,9 @@ func (m *insecure) Run(t *testing.T, parentCtx context.Context) { func validateCertificateResponse(t *testing.T, res *sentrypbv1.SignCertificateResponse, sentryBundle ca.Bundle, expectSPIFFEID, expectDNSName string) { t.Helper() - require.NotEmpty(t, res.WorkloadCertificate) + require.NotEmpty(t, res.GetWorkloadCertificate()) - rest := res.WorkloadCertificate + rest := res.GetWorkloadCertificate() // First block should contain the issued workload certificate var block *pem.Block diff --git a/tests/integration/suite/sentry/validator/jwks/jwks.go b/tests/integration/suite/sentry/validator/jwks/jwks.go index 4f17543ddf4..e3218ce3445 100644 --- a/tests/integration/suite/sentry/validator/jwks/jwks.go +++ b/tests/integration/suite/sentry/validator/jwks/jwks.go @@ -153,7 +153,7 @@ func (m *jwks) Run(t *testing.T, parentCtx context.Context) { Token: string(token), }) require.NoError(t, err) - require.NotEmpty(t, res.WorkloadCertificate) + require.NotEmpty(t, res.GetWorkloadCertificate()) validateCertificateResponse(t, res, m.proc.CABundle(), defaultAppSPIFFEID, defaultAppDNSName) }) diff --git a/tests/integration/suite/sentry/validator/jwks/utils.go b/tests/integration/suite/sentry/validator/jwks/utils.go index 914c85ab220..3621922be18 100644 --- a/tests/integration/suite/sentry/validator/jwks/utils.go +++ b/tests/integration/suite/sentry/validator/jwks/utils.go @@ -108,9 +108,9 @@ func signJWT(builder *jwt.Builder) ([]byte, error) { func validateCertificateResponse(t *testing.T, res *sentrypbv1.SignCertificateResponse, sentryBundle ca.Bundle, expectSPIFFEID, expectDNSName string) { t.Helper() - require.NotEmpty(t, res.WorkloadCertificate) + require.NotEmpty(t, res.GetWorkloadCertificate()) - rest := res.WorkloadCertificate + rest := res.GetWorkloadCertificate() // First block should contain the issued workload certificate { diff --git a/tests/integration/suite/sentry/validator/kubernetes/kubernetes.go b/tests/integration/suite/sentry/validator/kubernetes/kubernetes.go index b1a6f39a4b9..5103793469e 100644 --- a/tests/integration/suite/sentry/validator/kubernetes/kubernetes.go +++ b/tests/integration/suite/sentry/validator/kubernetes/kubernetes.go @@ -88,18 +88,18 @@ func (k *kubernetes) Run(t *testing.T, ctx context.Context) { Token: `{"kubernetes.io":{"pod":{"name":"mypod"}}}`, }) require.NoError(t, err) - require.NotEmpty(t, resp.WorkloadCertificate) + require.NotEmpty(t, resp.GetWorkloadCertificate()) - certs, err := secpem.DecodePEMCertificates(resp.WorkloadCertificate) + certs, err := secpem.DecodePEMCertificates(resp.GetWorkloadCertificate()) require.NoError(t, err) require.Len(t, certs, 2) - assert.NoError(t, certs[0].CheckSignatureFrom(certs[1])) + require.NoError(t, certs[0].CheckSignatureFrom(certs[1])) require.Len(t, k.sentry.CABundle().IssChain, 1) assert.Equal(t, k.sentry.CABundle().IssChain[0].Raw, certs[1].Raw) trustBundle, err := secpem.DecodePEMCertificates(k.sentry.CABundle().TrustAnchors) require.NoError(t, err) require.Len(t, trustBundle, 1) - assert.NoError(t, certs[1].CheckSignatureFrom(trustBundle[0])) + require.NoError(t, certs[1].CheckSignatureFrom(trustBundle[0])) for _, req := range map[string]*sentrypbv1.SignCertificateRequest{ "wrong app id": { diff --git a/tests/integration/suite/sentry/validator/kubernetes/longname.go b/tests/integration/suite/sentry/validator/kubernetes/longname.go index a0830788862..f50c33d4a0c 100644 --- a/tests/integration/suite/sentry/validator/kubernetes/longname.go +++ b/tests/integration/suite/sentry/validator/kubernetes/longname.go @@ -88,5 +88,5 @@ func (l *longname) Run(t *testing.T, ctx context.Context) { Token: `{"kubernetes.io":{"pod":{"name":"mypod"}}}`, }) require.NoError(t, err) - require.NotEmpty(t, resp.WorkloadCertificate) + require.NotEmpty(t, resp.GetWorkloadCertificate()) } diff --git a/tests/perf/test_params_test.go b/tests/perf/test_params_test.go index 7e62bb8a0d7..2b8d7fa17fb 100644 --- a/tests/perf/test_params_test.go +++ b/tests/perf/test_params_test.go @@ -25,11 +25,11 @@ func TestParamsOpts(t *testing.T) { t.Run("default params should be used when env vars and params are absent", func(t *testing.T) { p := Params() - assert.Equal(t, p.ClientConnections, defaultClientConnections) - assert.Equal(t, p.Payload, defaultPayload) - assert.Equal(t, p.PayloadSizeKB, defaultPayloadSizeKB) - assert.Equal(t, p.QPS, defaultQPS) - assert.Equal(t, p.TestDuration, defaultTestDuration) + assert.Equal(t, defaultClientConnections, p.ClientConnections) + assert.Equal(t, defaultPayload, p.Payload) + assert.Equal(t, defaultPayloadSizeKB, p.PayloadSizeKB) + assert.Equal(t, defaultQPS, p.QPS) + assert.Equal(t, defaultTestDuration, p.TestDuration) }) t.Run("manually-set params should be used when specified", func(t *testing.T) { clientConnections := defaultClientConnections + 1 diff --git a/tests/platforms/kubernetes/appmanager_test.go b/tests/platforms/kubernetes/appmanager_test.go index 4a32bb2cceb..ea56f9287f3 100644 --- a/tests/platforms/kubernetes/appmanager_test.go +++ b/tests/platforms/kubernetes/appmanager_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -70,7 +71,7 @@ func TestDeployApp(t *testing.T) { // act _, err := appManager.Deploy() - assert.NoError(t, err) + require.NoError(t, err) // assert deploymentClient := client.Deployments(testNamespace) @@ -126,12 +127,12 @@ func TestWaitUntilDeploymentState(t *testing.T) { // act _, err := appManager.Deploy() - assert.NoError(t, err) + require.NoError(t, err) // assert d, err := appManager.WaitUntilDeploymentState(appManager.IsDeploymentDone) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, testApp.Replicas, d.Status.ReadyReplicas) assert.Equal(t, expectedGetVerbCalled, getVerbCalled) }) @@ -179,12 +180,12 @@ func TestWaitUntilDeploymentState(t *testing.T) { // act _, err := appManager.Deploy() - assert.NoError(t, err) + require.NoError(t, err) // assert d, err := appManager.WaitUntilDeploymentState(appManager.IsDeploymentDeleted) - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, d) assert.Equal(t, expectedGetVerbCalled, getVerbCalled) }) @@ -228,22 +229,22 @@ func TestScaleDeploymentReplica(t *testing.T) { t.Run("lower bound check", func(t *testing.T) { err := appManager.ScaleDeploymentReplica(-1) - assert.Error(t, err) + require.Error(t, err) }) t.Run("upper bound check", func(t *testing.T) { err := appManager.ScaleDeploymentReplica(maxReplicas + 1) - assert.Error(t, err) + require.Error(t, err) }) t.Run("same replicas", func(t *testing.T) { err := appManager.ScaleDeploymentReplica(1) - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("new replicas", func(t *testing.T) { err := appManager.ScaleDeploymentReplica(3) - assert.NoError(t, err) + require.NoError(t, err) }) } @@ -294,7 +295,7 @@ func TestValidateSidecar(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) err := appManager.ValidateSidecar() - assert.NoError(t, err) + require.NoError(t, err) }) t.Run("Sidecar is not injected", func(t *testing.T) { @@ -328,7 +329,7 @@ func TestValidateSidecar(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) err := appManager.ValidateSidecar() - assert.Error(t, err) + require.Error(t, err) }) t.Run("Pod is not found", func(t *testing.T) { @@ -350,7 +351,7 @@ func TestValidateSidecar(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) err := appManager.ValidateSidecar() - assert.Error(t, err) + require.Error(t, err) }) } @@ -363,7 +364,7 @@ func TestCreateIngressService(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) _, err := appManager.CreateIngressService() - assert.NoError(t, err) + require.NoError(t, err) // assert serviceClient := client.Services(testNamespace) obj, _ := serviceClient.Get(context.TODO(), testApp.AppName, metav1.GetOptions{}) @@ -379,7 +380,7 @@ func TestCreateIngressService(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) _, err := appManager.CreateIngressService() - assert.NoError(t, err) + require.NoError(t, err) // assert serviceClient := client.Services(testNamespace) obj, _ := serviceClient.Get(context.TODO(), testApp.AppName, metav1.GetOptions{}) @@ -422,7 +423,7 @@ func TestWaitUntilServiceStateAndGetExternalURL(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) svcObj, err := appManager.WaitUntilServiceState(appManager.app.AppName, appManager.IsServiceIngressReady) - assert.NoError(t, err) + require.NoError(t, err) externalURL := appManager.AcquireExternalURLFromService(svcObj) assert.Equal(t, externalURL, fmt.Sprintf("%s:%d", fakeMinikubeNodeIP, fakeNodePort)) @@ -473,7 +474,7 @@ func TestWaitUntilServiceStateAndGetExternalURL(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) svcObj, err := appManager.WaitUntilServiceState(appManager.app.AppName, appManager.IsServiceIngressReady) - assert.NoError(t, err) + require.NoError(t, err) externalURL := appManager.AcquireExternalURLFromService(svcObj) assert.Equal(t, fmt.Sprintf("%s:%d", fakeExternalIP, fakeNodePort), externalURL) @@ -504,7 +505,7 @@ func TestWaitUntilServiceStateDeleted(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) svcObj, err := appManager.WaitUntilServiceState(appManager.app.AppName, appManager.IsServiceDeleted) - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, svcObj) } @@ -546,7 +547,7 @@ func TestDeleteDeployment(t *testing.T) { client.ClientSet.(*fake.Clientset).AddReactor("delete", "deployments", tt.actionFunc) appManager := NewAppManager(client, testNamespace, testApp) err := appManager.DeleteDeployment(false) - assert.NoError(t, err) + require.NoError(t, err) }) } } @@ -590,7 +591,7 @@ func TestDeleteService(t *testing.T) { appManager := NewAppManager(client, testNamespace, testApp) err := appManager.DeleteService(false) - assert.NoError(t, err) + require.NoError(t, err) }) } } diff --git a/tests/platforms/kubernetes/daprcomponent_test.go b/tests/platforms/kubernetes/daprcomponent_test.go index 260296c4d98..10800670426 100644 --- a/tests/platforms/kubernetes/daprcomponent_test.go +++ b/tests/platforms/kubernetes/daprcomponent_test.go @@ -25,7 +25,7 @@ func TestDaprComponentSpec(t *testing.T) { daprComponent := DaprComponent{component: ComponentDescription{ Name: testName, }} - assert.Equal(t, daprComponent.toComponentSpec().Name, testName) + assert.Equal(t, testName, daprComponent.toComponentSpec().Name) }) t.Run("should set typename when specified", func(t *testing.T) { @@ -33,7 +33,7 @@ func TestDaprComponentSpec(t *testing.T) { daprComponent := DaprComponent{component: ComponentDescription{ TypeName: testTypeName, }} - assert.Equal(t, daprComponent.toComponentSpec().Spec.Type, testTypeName) + assert.Equal(t, testTypeName, daprComponent.toComponentSpec().Spec.Type) }) t.Run("should add metadata when specified", func(t *testing.T) { @@ -45,7 +45,7 @@ func TestDaprComponentSpec(t *testing.T) { }} metadata := daprComponent.toComponentSpec().Spec.Metadata assert.Len(t, metadata, 1) - assert.Equal(t, metadata[0].Name, testKey) + assert.Equal(t, testKey, metadata[0].Name) assert.Equal(t, metadata[0].Value.Raw, []byte(testValue)) }) @@ -61,9 +61,9 @@ func TestDaprComponentSpec(t *testing.T) { }} metadata := daprComponent.toComponentSpec().Spec.Metadata assert.Len(t, metadata, 1) - assert.Equal(t, metadata[0].Name, testSecretKey) - assert.Equal(t, metadata[0].SecretKeyRef.Name, fromSecretName) - assert.Equal(t, metadata[0].SecretKeyRef.Key, fromSecretKey) + assert.Equal(t, testSecretKey, metadata[0].Name) + assert.Equal(t, fromSecretName, metadata[0].SecretKeyRef.Name) + assert.Equal(t, fromSecretKey, metadata[0].SecretKeyRef.Key) }) t.Run("should add component annotations when container image is specified", func(t *testing.T) { @@ -75,6 +75,6 @@ func TestDaprComponentSpec(t *testing.T) { annotations := daprComponent.toComponentSpec().ObjectMeta.Annotations assert.Len(t, annotations, 1) - assert.Equal(t, annotations["dapr.io/component-container"], testContainer) + assert.Equal(t, testContainer, annotations["dapr.io/component-container"]) }) } diff --git a/tests/runner/loadtest/fortio_test.go b/tests/runner/loadtest/fortio_test.go index 26c771750cb..627b9ba2570 100644 --- a/tests/runner/loadtest/fortio_test.go +++ b/tests/runner/loadtest/fortio_test.go @@ -17,12 +17,13 @@ import ( "errors" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/dapr/dapr/tests/perf" kube "github.com/dapr/dapr/tests/platforms/kubernetes" "github.com/dapr/dapr/tests/runner" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" ) // mockPlatform is the mock of Disposable interface. @@ -45,12 +46,12 @@ func TestFortio(t *testing.T) { t.Run("WithAffinity should set test affinity", func(t *testing.T) { const affinity = "test-affinity" f := NewFortio(WithAffinity(affinity)) - assert.Equal(t, f.affinityLabel, affinity) + assert.Equal(t, affinity, f.affinityLabel) }) t.Run("WithTesterImage should set test image", func(t *testing.T) { const testerImage = "image" f := NewFortio(WithTesterImage(testerImage)) - assert.Equal(t, f.testerImage, testerImage) + assert.Equal(t, testerImage, f.testerImage) }) t.Run("WithParams should set test params", func(t *testing.T) { params := perf.TestParameters{} @@ -60,12 +61,12 @@ func TestFortio(t *testing.T) { t.Run("WithNumHealthChecks set the number of necessary healthchecks to consider app healthy", func(t *testing.T) { const numHealthCheck = 2 f := NewFortio(WithNumHealthChecks(numHealthCheck)) - assert.Equal(t, f.numHealthChecks, numHealthCheck) + assert.Equal(t, numHealthCheck, f.numHealthChecks) }) t.Run("WithTestAppName should set test app name", func(t *testing.T) { const appTestName = "test-app" f := NewFortio(WithTestAppName(appTestName)) - assert.Equal(t, f.testApp, appTestName) + assert.Equal(t, appTestName, f.testApp) }) t.Run("SetParams should set test params and result to nil", func(t *testing.T) { params := perf.TestParameters{} @@ -78,7 +79,7 @@ func TestFortio(t *testing.T) { assert.Equal(t, f.params, paramsOthers) }) t.Run("valiate should return error when apptesterurl is empty", func(t *testing.T) { - assert.Error(t, NewFortio().validate()) + require.Error(t, NewFortio().validate()) }) t.Run("setup should return error if AddApps return an error", func(t *testing.T) { errFake := errors.New("my-err") @@ -94,7 +95,7 @@ func TestFortio(t *testing.T) { mockPlatform.On("AcquireAppExternalURL", appName).Return("") f := NewFortio(WithTestAppName(appName)) setupErr := f.setup(mockPlatform) - assert.Error(t, setupErr) + require.Error(t, setupErr) }) t.Run("Run should return error when validate return an error", func(t *testing.T) { const appName = "app-test" @@ -103,9 +104,9 @@ func TestFortio(t *testing.T) { mockPlatform.On("AcquireAppExternalURL", appName).Return("") f := NewFortio(WithTestAppName(appName)) setupErr := f.Run(mockPlatform) - assert.Error(t, setupErr) + require.Error(t, setupErr) mockPlatform.AssertNumberOfCalls(t, "AcquireAppExternalURL", 1) - assert.Error(t, f.Run(mockPlatform)) + require.Error(t, f.Run(mockPlatform)) mockPlatform.AssertNumberOfCalls(t, "AcquireAppExternalURL", 1) }) } diff --git a/tests/runner/loadtest/k6_client_test.go b/tests/runner/loadtest/k6_client_test.go index c6c4bc1462b..ba540c4d30e 100644 --- a/tests/runner/loadtest/k6_client_test.go +++ b/tests/runner/loadtest/k6_client_test.go @@ -43,7 +43,7 @@ func TestK6Client(t *testing.T) { Client: fake.CreateHTTPClient(func(r *http.Request) (*http.Response, error) { onRequest(r) return &http.Response{ - Body: io.NopCloser(bytes.NewBuffer([]byte(`{}`))), + Body: io.NopCloser(bytes.NewBufferString("{}")), StatusCode: http.StatusOK, }, nil }), @@ -57,39 +57,39 @@ func TestK6Client(t *testing.T) { called := 0 k6.client = getClient(func(r *http.Request) { called++ - assert.Equal(t, r.Method, "DELETE") + assert.Equal(t, "DELETE", r.Method) }) require.NoError(t, k6.Delete(context.Background(), k6Name, metav1.DeleteOptions{})) - assert.Equal(t, called, 1) + assert.Equal(t, 1, called) }) t.Run("Get should call rest GET", func(t *testing.T) { called := 0 k6.client = getClient(func(r *http.Request) { called++ - assert.Equal(t, r.Method, "GET") + assert.Equal(t, "GET", r.Method) }) _, err = k6.Get(context.Background(), k6Name) require.NoError(t, err) - assert.Equal(t, called, 1) + assert.Equal(t, 1, called) }) t.Run("Create should call rest POST", func(t *testing.T) { called := 0 k6.client = getClient(func(r *http.Request) { called++ - assert.Equal(t, r.Method, "POST") + assert.Equal(t, "POST", r.Method) }) _, err = k6.Create(context.Background(), &v1.K6{}) require.NoError(t, err) - assert.Equal(t, called, 1) + assert.Equal(t, 1, called) }) t.Run("List should call rest GET with filters", func(t *testing.T) { called := 0 k6.client = getClient(func(r *http.Request) { called++ - assert.Equal(t, r.Method, "GET") + assert.Equal(t, "GET", r.Method) }) _, err = k6.List(context.Background(), metav1.ListOptions{}) require.NoError(t, err) - assert.Equal(t, called, 1) + assert.Equal(t, 1, called) }) } diff --git a/tests/runner/loadtest/k6_test.go b/tests/runner/loadtest/k6_test.go index 279188c2490..4c3730fad72 100644 --- a/tests/runner/loadtest/k6_test.go +++ b/tests/runner/loadtest/k6_test.go @@ -168,16 +168,16 @@ func TestK6(t *testing.T) { WithParallelism(parallelism), WithName(fakeName), ) - assert.Equal(t, tester.script, script) - assert.Equal(t, tester.namespace, fakeNamespace) + assert.Equal(t, script, tester.script) + assert.Equal(t, fakeNamespace, tester.namespace) assert.Len(t, tester.runnerEnv, 1) - assert.Equal(t, tester.runnerImage, fakeImg) - assert.Equal(t, tester.addDapr, false) - assert.Equal(t, tester.parallelism, parallelism) - assert.Equal(t, tester.name, fakeName) + assert.Equal(t, fakeImg, tester.runnerImage) + assert.False(t, tester.addDapr) + assert.Equal(t, parallelism, tester.parallelism) + assert.Equal(t, fakeName, tester.name) }) t.Run("Dispose should return nil when not client was set", func(t *testing.T) { - assert.Nil(t, NewK6("").Dispose()) + require.NoError(t, NewK6("").Dispose()) }) t.Run("Dispose should return nil when delete does not returns an error", func(t *testing.T) { jobs := new(fakeJobClient) @@ -196,7 +196,7 @@ func TestK6(t *testing.T) { k6Client := new(fakeK6Client) k6Client.On("Delete", mock.Anything, k6.name, mock.Anything).Return(nil) k6.k6Client = k6Client - assert.Nil(t, k6.Dispose()) + require.NoError(t, k6.Dispose()) k6Client.AssertNumberOfCalls(t, "Delete", 1) jobs.AssertNumberOfCalls(t, "List", 1) }) @@ -218,7 +218,7 @@ func TestK6(t *testing.T) { k6Client := new(fakeK6Client) k6Client.On("Delete", mock.Anything, k6.name, mock.Anything).Return(apierrors.NewNotFound(schema.GroupResource{}, "k6")) k6.k6Client = k6Client - assert.Nil(t, k6.Dispose()) + require.NoError(t, k6.Dispose()) k6Client.AssertNumberOfCalls(t, "Delete", 1) jobs.AssertNumberOfCalls(t, "List", 1) }) @@ -294,7 +294,7 @@ func TestK6(t *testing.T) { fakeClient := fake.CreateHTTPClient(func(r *http.Request) (*http.Response, error) { called++ return &http.Response{ - Body: io.NopCloser(bytes.NewBuffer([]byte(`{}`))), + Body: io.NopCloser(bytes.NewBufferString("{}")), StatusCode: http.StatusOK, }, nil }) @@ -327,10 +327,10 @@ func TestK6(t *testing.T) { pods.On("List", mock.Anything, mock.Anything).Return(nil) summary, err := K6ResultDefault(k6) - assert.NoError(t, err) + require.NoError(t, err) pods.AssertNumberOfCalls(t, "List", 1) jobs.AssertNumberOfCalls(t, "List", 1) - assert.Equal(t, called, 1) + assert.Equal(t, 1, called) assert.True(t, summary.Pass) }) t.Run("Result should return an error if pod get logs return an error", func(t *testing.T) { @@ -340,7 +340,7 @@ func TestK6(t *testing.T) { fakeClient := fake.CreateHTTPClient(func(r *http.Request) (*http.Response, error) { called++ return &http.Response{ - Body: io.NopCloser(bytes.NewBuffer([]byte(`{}`))), + Body: io.NopCloser(bytes.NewBufferString("{}")), StatusCode: http.StatusInternalServerError, }, nil }) @@ -357,15 +357,15 @@ func TestK6(t *testing.T) { pods.On("List", mock.Anything, mock.Anything).Return(nil) _, err := K6ResultDefault(k6) - assert.NotNil(t, err) + require.Error(t, err) pods.AssertNumberOfCalls(t, "List", 1) - assert.Equal(t, called, 1) + assert.Equal(t, 1, called) }) t.Run("k8sRun should return an error if file not exists", func(t *testing.T) { const fileNotExists = "./not_exists.js" k6 := NewK6(fileNotExists) k6.setupOnce.Do(func() {}) // call once to avoid be called - assert.Error(t, k6.k8sRun(&runner.KubeTestPlatform{})) + require.Error(t, k6.k8sRun(&runner.KubeTestPlatform{})) }) t.Run("k8sRun should return an error if createconfig returns an error", func(t *testing.T) { diff --git a/tests/runner/testresource_test.go b/tests/runner/testresource_test.go index 2d57a105421..413d1235226 100644 --- a/tests/runner/testresource_test.go +++ b/tests/runner/testresource_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "golang.org/x/exp/slices" ) @@ -71,7 +72,7 @@ func TestSetup(t *testing.T) { } err := resource.setup() - assert.NoError(t, err) + require.NoError(t, err) found := []string{} for i := 2; i >= 0; i-- { @@ -101,7 +102,7 @@ func TestSetup(t *testing.T) { } err := resource.setup() - assert.Error(t, err) + require.Error(t, err) found := []string{} for i := 2; i >= 0; i-- { @@ -132,11 +133,11 @@ func TestTearDown(t *testing.T) { // setup resources err := resource.setup() - assert.NoError(t, err) + require.NoError(t, err) // tear down all resources err = resource.tearDown() - assert.NoError(t, err) + require.NoError(t, err) r := resource.popActiveResource() assert.Nil(t, r) @@ -159,11 +160,11 @@ func TestTearDown(t *testing.T) { // setup resources err := resource.setup() - assert.NoError(t, err) + require.NoError(t, err) // tear down all resources err = resource.tearDown() - assert.Error(t, err) + require.Error(t, err) r := resource.popActiveResource() assert.Nil(t, r) diff --git a/utils/host_test.go b/utils/host_test.go index 7bd4312de91..0500af5a97e 100644 --- a/utils/host_test.go +++ b/utils/host_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestGetHostAdress(t *testing.T) { @@ -25,13 +26,13 @@ func TestGetHostAdress(t *testing.T) { t.Setenv(HostIPEnvVar, hostIP) address, err := GetHostAddress() - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, hostIP, address) }) t.Run("DAPR_HOST_IP not present, non-empty response", func(t *testing.T) { address, err := GetHostAddress() - assert.Nil(t, err) + require.NoError(t, err) assert.NotEmpty(t, address) }) } diff --git a/utils/resolvconf_test.go b/utils/resolvconf_test.go index b4775999dd5..d95db3999f8 100644 --- a/utils/resolvconf_test.go +++ b/utils/resolvconf_test.go @@ -42,7 +42,7 @@ func TestGetClusterDomain(t *testing.T) { if err != nil { t.Fatalf("get kube cluster domain error:%s", err) } - assert.Equal(t, domain, tc.expected) + assert.Equal(t, tc.expected, domain) } } diff --git a/utils/utils_test.go b/utils/utils_test.go index 0e8530cfd36..b4511f2b6a7 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -53,14 +53,14 @@ func TestSetEnvVariables(t *testing.T) { err := SetEnvVariables(map[string]string{ "testKey": "testValue", }) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, "testValue", os.Getenv("testKey")) }) t.Run("set environment variables failed", func(t *testing.T) { err := SetEnvVariables(map[string]string{ "": "testValue", }) - assert.NotNil(t, err) + require.Error(t, err) assert.NotEqual(t, "testValue", os.Getenv("")) }) } @@ -101,7 +101,7 @@ func TestEnvOrElse(t *testing.T) { const elseValue, fakeEnVar = "fakeValue", "envVarThatDoesntExists" require.NoError(t, os.Unsetenv(fakeEnVar)) - assert.Equal(t, GetEnvOrElse(fakeEnVar, elseValue), elseValue) + assert.Equal(t, elseValue, GetEnvOrElse(fakeEnVar, elseValue)) }) t.Run("envOrElse should return env var value when env var is present", func(t *testing.T) { @@ -109,7 +109,7 @@ func TestEnvOrElse(t *testing.T) { defer os.Unsetenv(fakeEnVar) require.NoError(t, os.Setenv(fakeEnVar, fakeEnvVarValue)) - assert.Equal(t, GetEnvOrElse(fakeEnVar, elseValue), fakeEnvVarValue) + assert.Equal(t, fakeEnvVarValue, GetEnvOrElse(fakeEnVar, elseValue)) }) } @@ -153,7 +153,7 @@ func TestPopulateMetadataForBulkPublishEntry(t *testing.T) { "key2": "val2", } resMeta := PopulateMetadataForBulkPublishEntry(reqMeta, entryMeta) - assert.Equal(t, 4, len(resMeta), "expected length to match") + assert.Len(t, resMeta, 4, "expected length to match") assert.Contains(t, resMeta, "key1", "expected key to be present") assert.Equal(t, "val1", resMeta["key1"], "expected val to be equal") assert.Contains(t, resMeta, "key2", "expected key to be present") @@ -169,7 +169,7 @@ func TestPopulateMetadataForBulkPublishEntry(t *testing.T) { "key2": "val2", } resMeta := PopulateMetadataForBulkPublishEntry(reqMeta, entryMeta) - assert.Equal(t, 3, len(resMeta), "expected length to match") + assert.Len(t, resMeta, 3, "expected length to match") assert.Contains(t, resMeta, "key1", "expected key to be present") assert.Equal(t, "val1", resMeta["key1"], "expected val to be equal") assert.Contains(t, resMeta, "key2", "expected key to be present") @@ -185,8 +185,8 @@ func TestFilter(t *testing.T) { out := Filter(in, func(s string) bool { return s != "" }) - assert.Equal(t, 6, len(in)) - assert.Equal(t, 3, len(out)) + assert.Len(t, in, 6) + assert.Len(t, out, 3) assert.Equal(t, []string{"a", "b", "c"}, out) }) t.Run("should filter out empty values and return empty collection if all values are filtered out", func(t *testing.T) { @@ -194,9 +194,8 @@ func TestFilter(t *testing.T) { out := Filter(in, func(s string) bool { return s != "" }) - assert.Equal(t, 3, len(in)) - assert.Equal(t, 0, len(out)) - assert.Equal(t, []string{}, out) + assert.Len(t, in, 3) + assert.Empty(t, out) }) } From 812be3a89b9660c480b8091c1c4ebad9ac4d7c8b Mon Sep 17 00:00:00 2001 From: joshvanl Date: Fri, 1 Dec 2023 11:48:05 +0000 Subject: [PATCH 3/4] Disable tagalign linter, don't aline yaml tags Signed-off-by: joshvanl --- .golangci.yml | 1 + pkg/apis/configuration/v1alpha1/types.go | 8 ++--- pkg/apis/resiliency/v1alpha1/types.go | 42 ++++++++++++------------ 3 files changed, 26 insertions(+), 25 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e7fb5af6d36..8700e5a2196 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -330,3 +330,4 @@ linters: - varcheck - deadcode - inamedparam + - tagalign diff --git a/pkg/apis/configuration/v1alpha1/types.go b/pkg/apis/configuration/v1alpha1/types.go index 087bc1fba44..5f3e4680648 100644 --- a/pkg/apis/configuration/v1alpha1/types.go +++ b/pkg/apis/configuration/v1alpha1/types.go @@ -198,9 +198,9 @@ type TracingSpec struct { // OtelSpec defines Otel exporter configurations. type OtelSpec struct { - Protocol string `json:"protocol" yaml:"protocol"` + Protocol string `json:"protocol" yaml:"protocol"` EndpointAddress string `json:"endpointAddress" yaml:"endpointAddress"` - IsSecure *bool `json:"isSecure" yaml:"isSecure"` + IsSecure *bool `json:"isSecure" yaml:"isSecure"` } // ZipkinSpec defines Zipkin trace configurations. @@ -242,7 +242,7 @@ type AppPolicySpec struct { // AppOperationAction defines the data structure for each app operation. type AppOperationAction struct { - Operation string `json:"name" yaml:"name"` + Operation string `json:"name" yaml:"name"` Action string `json:"action" yaml:"action"` // +optional HTTPVerb []string `json:"httpVerb,omitempty" yaml:"httpVerb,omitempty"` @@ -260,7 +260,7 @@ type AccessControlSpec struct { // FeatureSpec defines the features that are enabled/disabled. type FeatureSpec struct { - Name string `json:"name" yaml:"name"` + Name string `json:"name" yaml:"name"` Enabled *bool `json:"enabled" yaml:"enabled"` } diff --git a/pkg/apis/resiliency/v1alpha1/types.go b/pkg/apis/resiliency/v1alpha1/types.go index 3431d95bf56..9115417e1c4 100644 --- a/pkg/apis/resiliency/v1alpha1/types.go +++ b/pkg/apis/resiliency/v1alpha1/types.go @@ -41,58 +41,58 @@ func (r Resiliency) String() string { type ResiliencySpec struct { Policies Policies `json:"policies"` - Targets Targets `json:"targets" yaml:"targets"` + Targets Targets `json:"targets" yaml:"targets"` } type Policies struct { - Timeouts map[string]string `json:"timeouts,omitempty" yaml:"timeouts,omitempty"` - Retries map[string]Retry `json:"retries,omitempty" yaml:"retries,omitempty"` + Timeouts map[string]string `json:"timeouts,omitempty" yaml:"timeouts,omitempty"` + Retries map[string]Retry `json:"retries,omitempty" yaml:"retries,omitempty"` CircuitBreakers map[string]CircuitBreaker `json:"circuitBreakers,omitempty" yaml:"circuitBreakers,omitempty"` } type Retry struct { - Policy string `json:"policy,omitempty" yaml:"policy,omitempty"` - Duration string `json:"duration,omitempty" yaml:"duration,omitempty"` + Policy string `json:"policy,omitempty" yaml:"policy,omitempty"` + Duration string `json:"duration,omitempty" yaml:"duration,omitempty"` MaxInterval string `json:"maxInterval,omitempty" yaml:"maxInterval,omitempty"` - MaxRetries *int `json:"maxRetries,omitempty" yaml:"maxRetries,omitempty"` + MaxRetries *int `json:"maxRetries,omitempty" yaml:"maxRetries,omitempty"` } type CircuitBreaker struct { MaxRequests int `json:"maxRequests,omitempty" yaml:"maxRequests,omitempty"` - Interval string `json:"interval,omitempty" yaml:"interval,omitempty"` - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Trip string `json:"trip,omitempty" yaml:"trip,omitempty"` + Interval string `json:"interval,omitempty" yaml:"interval,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + Trip string `json:"trip,omitempty" yaml:"trip,omitempty"` } type Targets struct { - Apps map[string]EndpointPolicyNames `json:"apps,omitempty" yaml:"apps,omitempty"` - Actors map[string]ActorPolicyNames `json:"actors,omitempty" yaml:"actors,omitempty"` + Apps map[string]EndpointPolicyNames `json:"apps,omitempty" yaml:"apps,omitempty"` + Actors map[string]ActorPolicyNames `json:"actors,omitempty" yaml:"actors,omitempty"` Components map[string]ComponentPolicyNames `json:"components,omitempty" yaml:"components,omitempty"` } type ComponentPolicyNames struct { - Inbound PolicyNames `json:"inbound,omitempty" yaml:"inbound,omitempty"` + Inbound PolicyNames `json:"inbound,omitempty" yaml:"inbound,omitempty"` Outbound PolicyNames `json:"outbound,omitempty" yaml:"outbound,omitempty"` } type PolicyNames struct { - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` } type EndpointPolicyNames struct { - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` - CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` + CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` CircuitBreakerCacheSize int `json:"circuitBreakerCacheSize,omitempty" yaml:"circuitBreakerCacheSize,omitempty"` } type ActorPolicyNames struct { - Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` - Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` - CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` - CircuitBreakerScope string `json:"circuitBreakerScope,omitempty" yaml:"circuitBreakerScope,omitempty"` + Timeout string `json:"timeout,omitempty" yaml:"timeout,omitempty"` + Retry string `json:"retry,omitempty" yaml:"retry,omitempty"` + CircuitBreaker string `json:"circuitBreaker,omitempty" yaml:"circuitBreaker,omitempty"` + CircuitBreakerScope string `json:"circuitBreakerScope,omitempty" yaml:"circuitBreakerScope,omitempty"` CircuitBreakerCacheSize int `json:"circuitBreakerCacheSize,omitempty" yaml:"circuitBreakerCacheSize,omitempty"` } From b63e7e929234d0110fff03d52a39b91840c68ff9 Mon Sep 17 00:00:00 2001 From: joshvanl Date: Fri, 1 Dec 2023 11:58:55 +0000 Subject: [PATCH 4/4] Remove gRPC Get nil checkings Signed-off-by: joshvanl --- pkg/grpc/api_daprinternal.go | 4 +-- pkg/grpc/api_test.go | 6 +--- pkg/messaging/direct_messaging.go | 4 +-- pkg/messaging/v1/util.go | 2 +- pkg/runtime/processor/binding/send.go | 4 +-- .../processor/pubsub/bulk_subscriber.go | 29 ++++++++-------- pkg/runtime/pubsub/subscriptions.go | 2 +- pkg/sentry/server/server.go | 33 ++++++++++--------- pkg/testing/grpc/server.go | 3 +- .../pubsub-subscriber-routing_grpc/app.go | 2 +- tests/apps/pubsub-subscriber_grpc/app.go | 2 +- 11 files changed, 44 insertions(+), 47 deletions(-) diff --git a/pkg/grpc/api_daprinternal.go b/pkg/grpc/api_daprinternal.go index dbbb87bee41..3599fe90008 100644 --- a/pkg/grpc/api_daprinternal.go +++ b/pkg/grpc/api_daprinternal.go @@ -90,7 +90,7 @@ func (a *api) CallLocalStream(stream internalv1pb.ServiceInvocation_CallLocalStr if err != nil { return err } - if chunk.GetRequest() == nil || chunk.GetRequest().GetMetadata() == nil || chunk.GetRequest().GetMessage() == nil { + if chunk.GetRequest().GetMetadata() == nil || chunk.GetRequest().GetMessage() == nil { return status.Errorf(codes.InvalidArgument, messages.ErrInternalInvokeRequest, "request does not contain the required fields in the leading chunk") } @@ -186,7 +186,7 @@ func (a *api) CallLocalStream(stream internalv1pb.ServiceInvocation_CallLocalStr return } - if chunk.GetRequest() != nil && (chunk.GetRequest().GetMetadata() != nil || chunk.GetRequest().GetMessage() != nil) { + if chunk.GetRequest().GetMetadata() != nil || chunk.GetRequest().GetMessage() != nil { pw.CloseWithError(errors.New("request metadata found in non-leading chunk")) return } diff --git a/pkg/grpc/api_test.go b/pkg/grpc/api_test.go index 91e6d7bf1a6..e0241eab9dd 100644 --- a/pkg/grpc/api_test.go +++ b/pkg/grpc/api_test.go @@ -166,19 +166,15 @@ func (m *mockGRPCAPI) CallLocalStream(stream internalv1pb.ServiceInvocation_Call WithContentType("text/plain") defer resp.Close() - var data []byte pd, err := resp.ProtoWithData() if err != nil { return err } - if pd.GetMessage() != nil && pd.GetMessage().GetData() != nil { - data = pd.GetMessage().GetData().GetValue() - } stream.Send(&internalv1pb.InternalInvokeResponseStream{ Response: resp.Proto(), Payload: &commonv1pb.StreamPayload{ - Data: data, + Data: pd.GetMessage().GetData().GetValue(), Seq: 0, }, }) diff --git a/pkg/messaging/direct_messaging.go b/pkg/messaging/direct_messaging.go index b1d7bb5dcba..66adf9053b7 100644 --- a/pkg/messaging/direct_messaging.go +++ b/pkg/messaging/direct_messaging.go @@ -436,7 +436,7 @@ func (d *directMessaging) invokeRemoteStream(ctx context.Context, clientV1 inter } return nil, err } - if chunk.GetResponse() == nil || chunk.GetResponse().GetStatus() == nil { + if chunk.GetResponse().GetStatus() == nil { return nil, errors.New("response does not contain the required fields in the leading chunk") } pr, pw := io.Pipe() @@ -486,7 +486,7 @@ func (d *directMessaging) invokeRemoteStream(ctx context.Context, clientV1 inter return } - if chunk.GetResponse() != nil && (chunk.GetResponse().GetStatus() != nil || chunk.GetResponse().GetHeaders() != nil || chunk.GetResponse().GetMessage() != nil) { + if chunk.GetResponse().GetStatus() != nil || chunk.GetResponse().GetHeaders() != nil || chunk.GetResponse().GetMessage() != nil { pw.CloseWithError(errors.New("response metadata found in non-leading chunk")) return } diff --git a/pkg/messaging/v1/util.go b/pkg/messaging/v1/util.go index 54efddd1280..f089e1ea3f3 100644 --- a/pkg/messaging/v1/util.go +++ b/pkg/messaging/v1/util.go @@ -125,7 +125,7 @@ func httpHeadersToInternalMetadata(header http.Header) DaprInternalMetadata { internalMD := make(DaprInternalMetadata, len(header)) for key, val := range header { // Note: HTTP headers can never be binary (only gRPC supports binary headers) - if internalMD[key] == nil || len(internalMD[key].GetValues()) == 0 { + if len(internalMD[key].GetValues()) == 0 { internalMD[key] = &internalv1pb.ListStringValue{ Values: val, } diff --git a/pkg/runtime/processor/binding/send.go b/pkg/runtime/processor/binding/send.go index 30b6ea24cee..95f274a88b5 100644 --- a/pkg/runtime/processor/binding/send.go +++ b/pkg/runtime/processor/binding/send.go @@ -366,8 +366,8 @@ func (b *binding) sendBindingEventToApp(ctx context.Context, bindingName string, appResponseBody, err = resp.RawDataFull() // ::TODO report metrics for http, such as grpc - if resp.Status().GetCode() < 200 || resp.Status().GetCode() > 299 { - return nil, fmt.Errorf("fails to send binding event to http app channel, status code: %d body: %s", resp.Status().GetCode(), string(appResponseBody)) + if code := resp.Status().GetCode(); code < 200 || code > 299 { + return nil, fmt.Errorf("fails to send binding event to http app channel, status code: %d body: %s", code, string(appResponseBody)) } if err != nil { diff --git a/pkg/runtime/processor/pubsub/bulk_subscriber.go b/pkg/runtime/processor/pubsub/bulk_subscriber.go index 503ceb93d6f..045b2b1bb3d 100644 --- a/pkg/runtime/processor/pubsub/bulk_subscriber.go +++ b/pkg/runtime/processor/pubsub/bulk_subscriber.go @@ -600,27 +600,28 @@ func (p *pubsub) publishBulkMessageGRPC(ctx context.Context, bulkSubCallData *bu hasAnyError := false for _, response := range res.GetStatuses() { - if _, ok := (*bscData.entryIdIndexMap)[response.GetEntryId()]; ok { + entryID := response.GetEntryId() + if _, ok := (*bscData.entryIdIndexMap)[entryID]; ok { switch response.GetStatus() { case runtimev1pb.TopicEventResponse_SUCCESS: //nolint:nosnakecase // on uninitialized status, this is the case it defaults to as an uninitialized status defaults to 0 which is // success from protobuf definition bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Success)] += 1 - entryRespReceived[response.GetEntryId()] = true - addBulkResponseEntry(bulkResponses, response.GetEntryId(), nil) + entryRespReceived[entryID] = true + addBulkResponseEntry(bulkResponses, entryID, nil) case runtimev1pb.TopicEventResponse_RETRY: //nolint:nosnakecase bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += 1 - entryRespReceived[response.GetEntryId()] = true - addBulkResponseEntry(bulkResponses, response.GetEntryId(), - fmt.Errorf("RETRY status returned from app while processing pub/sub event for entry id: %v", response.GetEntryId())) + entryRespReceived[entryID] = true + addBulkResponseEntry(bulkResponses, entryID, + fmt.Errorf("RETRY status returned from app while processing pub/sub event for entry id: %v", entryID)) hasAnyError = true case runtimev1pb.TopicEventResponse_DROP: //nolint:nosnakecase - log.Warnf("DROP status returned from app while processing pub/sub event for entry id: %v", response.GetEntryId()) + log.Warnf("DROP status returned from app while processing pub/sub event for entry id: %v", entryID) bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Drop)] += 1 - entryRespReceived[response.GetEntryId()] = true - addBulkResponseEntry(bulkResponses, response.GetEntryId(), nil) + entryRespReceived[entryID] = true + addBulkResponseEntry(bulkResponses, entryID, nil) if deadLetterTopic != "" { - msg := psm.pubSubMessages[(*bscData.entryIdIndexMap)[response.GetEntryId()]] + msg := psm.pubSubMessages[(*bscData.entryIdIndexMap)[entryID]] _ = p.sendToDeadLetter(ctx, bscData.psName, &contribpubsub.NewMessage{ Data: msg.entry.Event, Topic: bscData.topic, @@ -631,13 +632,13 @@ func (p *pubsub) publishBulkMessageGRPC(ctx context.Context, bulkSubCallData *bu default: // Consider unknown status field as error and retry bscData.bulkSubDiag.statusWiseDiag[string(contribpubsub.Retry)] += 1 - entryRespReceived[response.GetEntryId()] = true - addBulkResponseEntry(bulkResponses, response.GetEntryId(), - fmt.Errorf("unknown status returned from app while processing pub/sub event for entry id %v: %v", response.GetEntryId(), response.GetStatus())) + entryRespReceived[entryID] = true + addBulkResponseEntry(bulkResponses, entryID, + fmt.Errorf("unknown status returned from app while processing pub/sub event for entry id %v: %v", entryID, response.GetStatus())) hasAnyError = true } } else { - log.Warnf("Invalid entry id received from app while processing pub/sub event %v", response.GetEntryId()) + log.Warnf("Invalid entry id received from app while processing pub/sub event %v", entryID) continue } } diff --git a/pkg/runtime/pubsub/subscriptions.go b/pkg/runtime/pubsub/subscriptions.go index 9f3e936d698..6db1b7d4415 100644 --- a/pkg/runtime/pubsub/subscriptions.go +++ b/pkg/runtime/pubsub/subscriptions.go @@ -198,7 +198,7 @@ func GetSubscriptionsGRPC(ctx context.Context, channel runtimev1pb.AppCallbackCl } var subscriptions []Subscription - if resp == nil || len(resp.GetSubscriptions()) == 0 { + if len(resp.GetSubscriptions()) == 0 { log.Debug(noSubscriptionsError) } else { subscriptions = make([]Subscription, len(resp.GetSubscriptions())) diff --git a/pkg/sentry/server/server.go b/pkg/sentry/server/server.go index 76f1aa8fda5..5b59003d3cf 100644 --- a/pkg/sentry/server/server.go +++ b/pkg/sentry/server/server.go @@ -111,42 +111,43 @@ func (s *server) signCertificate(ctx context.Context, req *sentryv1pb.SignCertif if req.GetTokenValidator() != sentryv1pb.SignCertificateRequest_UNKNOWN && req.GetTokenValidator().String() != "" { validator = req.GetTokenValidator() } + namespace := req.GetNamespace() if validator == sentryv1pb.SignCertificateRequest_UNKNOWN { - log.Debugf("Validator '%s' is not known for %s/%s", validator.String(), req.GetNamespace(), req.GetId()) + log.Debugf("Validator '%s' is not known for %s/%s", validator.String(), namespace, req.GetId()) return nil, status.Error(codes.InvalidArgument, "a validator name must be specified in this environment") } if _, ok := s.vals[validator]; !ok { - log.Debugf("Validator '%s' is not enabled for %s/%s", validator.String(), req.GetNamespace(), req.GetId()) + log.Debugf("Validator '%s' is not enabled for %s/%s", validator.String(), namespace, req.GetId()) return nil, status.Error(codes.InvalidArgument, "the requested validator is not enabled") } - log.Debugf("Processing SignCertificate request for %s/%s (validator: %s)", req.GetNamespace(), req.GetId(), validator.String()) + log.Debugf("Processing SignCertificate request for %s/%s (validator: %s)", namespace, req.GetId(), validator.String()) trustDomain, overrideDuration, err := s.vals[validator].Validate(ctx, req) if err != nil { - log.Debugf("Failed to validate request for %s/%s: %s", req.GetNamespace(), req.GetId(), err) + log.Debugf("Failed to validate request for %s/%s: %s", namespace, req.GetId(), err) return nil, status.Error(codes.PermissionDenied, err.Error()) } der, _ := pem.Decode(req.GetCertificateSigningRequest()) if der == nil { - log.Debugf("Invalid CSR: PEM block is nil for %s/%s", req.GetNamespace(), req.GetId()) + log.Debugf("Invalid CSR: PEM block is nil for %s/%s", namespace, req.GetId()) return nil, status.Error(codes.InvalidArgument, "invalid certificate signing request") } // TODO: @joshvanl: Before v1.12, daprd was sending CSRs with the PEM block type "CERTIFICATE" // After 1.14, allow only "CERTIFICATE REQUEST" if der.Type != "CERTIFICATE REQUEST" && der.Type != "CERTIFICATE" { - log.Debugf("Invalid CSR: PEM block type is invalid for %s/%s: %s", req.GetNamespace(), req.GetId(), der.Type) + log.Debugf("Invalid CSR: PEM block type is invalid for %s/%s: %s", namespace, req.GetId(), der.Type) return nil, status.Error(codes.InvalidArgument, "invalid certificate signing request") } csr, err := x509.ParseCertificateRequest(der.Bytes) if err != nil { - log.Debugf("Failed to parse CSR for %s/%s: %v", req.GetNamespace(), req.GetId(), err) + log.Debugf("Failed to parse CSR for %s/%s: %v", namespace, req.GetId(), err) return nil, status.Errorf(codes.InvalidArgument, "failed to parse certificate signing request: %v", err) } if csr.CheckSignature() != nil { - log.Debugf("Invalid CSR: invalid signature for %s/%s", req.GetNamespace(), req.GetId()) + log.Debugf("Invalid CSR: invalid signature for %s/%s", namespace, req.GetId()) return nil, status.Error(codes.InvalidArgument, "invalid signature") } @@ -157,23 +158,23 @@ func (s *server) signCertificate(ctx context.Context, req *sentryv1pb.SignCertif // compatibility. Remove after v1.14. var dns []string switch { - case req.GetNamespace() == security.CurrentNamespace() && req.GetId() == "dapr-injector": - dns = []string{fmt.Sprintf("dapr-sidecar-injector.%s.svc", req.GetNamespace())} - case req.GetNamespace() == security.CurrentNamespace() && req.GetId() == "dapr-operator": + case namespace == security.CurrentNamespace() && req.GetId() == "dapr-injector": + dns = []string{fmt.Sprintf("dapr-sidecar-injector.%s.svc", namespace)} + case namespace == security.CurrentNamespace() && req.GetId() == "dapr-operator": // TODO: @joshvanl: before v1.12, daprd was matching on the operator server // having `cluster.local` as a DNS SAN name. Remove after v1.13. - dns = []string{"cluster.local", fmt.Sprintf("dapr-webhook.%s.svc", req.GetNamespace())} - case req.GetNamespace() == security.CurrentNamespace() && req.GetId() == "dapr-placement": + dns = []string{"cluster.local", fmt.Sprintf("dapr-webhook.%s.svc", namespace)} + case namespace == security.CurrentNamespace() && req.GetId() == "dapr-placement": dns = []string{"cluster.local"} default: - dns = []string{fmt.Sprintf("%s.%s.svc.cluster.local", req.GetId(), req.GetNamespace())} + dns = []string{fmt.Sprintf("%s.%s.svc.cluster.local", req.GetId(), namespace)} } chain, err := s.ca.SignIdentity(ctx, &ca.SignRequest{ PublicKey: csr.PublicKey, SignatureAlgorithm: csr.SignatureAlgorithm, TrustDomain: trustDomain.String(), - Namespace: req.GetNamespace(), + Namespace: namespace, AppID: req.GetId(), DNS: dns, }, overrideDuration) @@ -188,7 +189,7 @@ func (s *server) signCertificate(ctx context.Context, req *sentryv1pb.SignCertif return nil, status.Error(codes.Internal, "failed to encode certificate chain") } - log.Debugf("Successfully signed certificate for %s/%s", req.GetNamespace(), req.GetId()) + log.Debugf("Successfully signed certificate for %s/%s", namespace, req.GetId()) return &sentryv1pb.SignCertificateResponse{ WorkloadCertificate: chainPEM, diff --git a/pkg/testing/grpc/server.go b/pkg/testing/grpc/server.go index 30e7bde9f09..0a1a9125f71 100644 --- a/pkg/testing/grpc/server.go +++ b/pkg/testing/grpc/server.go @@ -20,12 +20,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/test/bufconn" - "github.com/stretchr/testify/require" - runtimev1pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "github.com/dapr/kit/logger" ) diff --git a/tests/apps/pubsub-subscriber-routing_grpc/app.go b/tests/apps/pubsub-subscriber-routing_grpc/app.go index 706eb814bd6..1508af480f5 100644 --- a/tests/apps/pubsub-subscriber-routing_grpc/app.go +++ b/tests/apps/pubsub-subscriber-routing_grpc/app.go @@ -121,7 +121,7 @@ func initializeSets() { // The payload carries a Method to identify the method, a set of metadata properties and an optional payload. func (s *server) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) { reqID := "s-" + uuid.New().String() - if in.GetHttpExtension() != nil && in.GetHttpExtension().GetQuerystring() != "" { + if len(in.GetHttpExtension().GetQuerystring()) > 0 { qs, err := url.ParseQuery(in.GetHttpExtension().GetQuerystring()) if err == nil && qs.Has("reqid") { reqID = qs.Get("reqid") diff --git a/tests/apps/pubsub-subscriber_grpc/app.go b/tests/apps/pubsub-subscriber_grpc/app.go index bf66100f53b..40b2f130d14 100644 --- a/tests/apps/pubsub-subscriber_grpc/app.go +++ b/tests/apps/pubsub-subscriber_grpc/app.go @@ -154,7 +154,7 @@ func initializeSets() { // The payload carries a Method to identify the method, a set of metadata properties and an optional payload. func (s *server) OnInvoke(ctx context.Context, in *commonv1pb.InvokeRequest) (*commonv1pb.InvokeResponse, error) { reqID := "s-" + uuid.New().String() - if in.GetHttpExtension() != nil && in.GetHttpExtension().GetQuerystring() != "" { + if len(in.GetHttpExtension().GetQuerystring()) > 0 { qs, err := url.ParseQuery(in.GetHttpExtension().GetQuerystring()) if err == nil && qs.Has("reqid") { reqID = qs.Get("reqid")