diff --git a/.chloggen/add-multiple-endpoints-support-httpcheckreceiver.yaml b/.chloggen/add-multiple-endpoints-support-httpcheckreceiver.yaml new file mode 100644 index 000000000000..49aeb87acef9 --- /dev/null +++ b/.chloggen/add-multiple-endpoints-support-httpcheckreceiver.yaml @@ -0,0 +1,13 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: httpcheckreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Added support for specifying multiple endpoints in the `httpcheckreceiver` using the `endpoints` field. Users can now monitor multiple URLs with a single configuration block, improving flexibility and reducing redundancy." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [37121] diff --git a/.chloggen/anvoy-als-receiver.yaml b/.chloggen/anvoy-als-receiver.yaml new file mode 100644 index 000000000000..5b1686c06c48 --- /dev/null +++ b/.chloggen/anvoy-als-receiver.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: envoyalsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add a new receiver for the Envoy ALS (Access Log Service). + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36464] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/cumulative-to-delta-processor-metric-type-filter.yaml b/.chloggen/cumulative-to-delta-processor-metric-type-filter.yaml new file mode 100644 index 000000000000..79f1c9d82175 --- /dev/null +++ b/.chloggen/cumulative-to-delta-processor-metric-type-filter.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: cumulativetodeltaprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add metric type filter for cumulativetodelta processor + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33673] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/elasticsearchexporter_logs_dynamic_id.yaml b/.chloggen/elasticsearchexporter_logs_dynamic_id.yaml new file mode 100644 index 000000000000..84867eac2a07 --- /dev/null +++ b/.chloggen/elasticsearchexporter_logs_dynamic_id.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add config `logs_dynamic_id` to dynamically set the document ID of log records using log record attribute `elasticsearch.document_id` + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36882] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/expo-histogram-fix-downscaling.yaml b/.chloggen/expo-histogram-fix-downscaling.yaml new file mode 100644 index 000000000000..0e4e89e0ea36 --- /dev/null +++ b/.chloggen/expo-histogram-fix-downscaling.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: deltatocumulativeprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: In order to cap number of histogram buckets take the min of desired scale across negative and positive buckets instead of the max + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [37416] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/mowies-deprecate-githubgen.yaml b/.chloggen/mowies-deprecate-githubgen.yaml new file mode 100644 index 000000000000..8a31d91dbf6c --- /dev/null +++ b/.chloggen/mowies-deprecate-githubgen.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: githubgen + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Switch over all usages of githubgen to the new tool location, since the old tool was deprecated + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [37412, 37294] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/pubsubreceiver-encodingextensions.yaml b/.chloggen/pubsubreceiver-encodingextensions.yaml new file mode 100644 index 000000000000..efbeca18fe5f --- /dev/null +++ b/.chloggen/pubsubreceiver-encodingextensions.yaml @@ -0,0 +1,11 @@ +change_type: enhancement + +component: googlecloudpubsubreceiver + +note: Added support for encoding extensions. + +issues: [37109] + +subtext: + +change_logs: [user] diff --git a/.github/ALLOWLIST b/.github/ALLOWLIST index eafc6d753c45..5e8e173f2d5e 100644 --- a/.github/ALLOWLIST +++ b/.github/ALLOWLIST @@ -1,15 +1,11 @@ # Code generated by githubgen. DO NOT EDIT. ##################################################### # -# List of components in OpenTelemetry Collector Contrib +# List of components # waiting on owners to be assigned # ##################################################### # -# Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md -# -# # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ca402aeb52ce..14c0208a3eba 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,14 +1,10 @@ # Code generated by githubgen. DO NOT EDIT. ##################################################### # -# List of codeowners for OpenTelemetry Collector Contrib +# List of codeowners # ##################################################### # -# Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md -# -# # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # @@ -222,6 +218,7 @@ receiver/couchdbreceiver/ @open-telemetry receiver/datadogreceiver/ @open-telemetry/collector-contrib-approvers @boostchicken @gouthamve @MovieStoreGuy receiver/dockerstatsreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis receiver/elasticsearchreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/envoyalsreceiver/ @open-telemetry/collector-contrib-approvers @evan-bradley receiver/expvarreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy receiver/filelogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski receiver/filestatsreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @@ -316,16 +313,18 @@ testbed/mockdatasenders/mockdatadogagentexporter/ @open-telemetry ##################################################### # -# List of distribution maintainers for OpenTelemetry Collector Contrib +# List of distribution maintainers # ##################################################### reports/distributions/core.yaml @open-telemetry/collector-contrib-approvers reports/distributions/contrib.yaml @open-telemetry/collector-contrib-approvers reports/distributions/k8s.yaml @open-telemetry/collector-contrib-approvers - +##################################################### +# ## UNMAINTAINED components - +# +##################################################### exporter/kineticaexporter/ @open-telemetry/collector-contrib-approvers exporter/opensearchexporter/ @open-telemetry/collector-contrib-approvers extension/observer/ecstaskobserver/ @open-telemetry/collector-contrib-approvers diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 580e28f0cd1b..95b022b70897 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -220,6 +220,7 @@ body: - receiver/datadog - receiver/dockerstats - receiver/elasticsearch + - receiver/envoyals - receiver/expvar - receiver/filelog - receiver/filestats diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index 64ba734ab373..acf39e99429e 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -214,6 +214,7 @@ body: - receiver/datadog - receiver/dockerstats - receiver/elasticsearch + - receiver/envoyals - receiver/expvar - receiver/filelog - receiver/filestats diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index a7ab6232949e..a99efb30d46b 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -214,6 +214,7 @@ body: - receiver/datadog - receiver/dockerstats - receiver/elasticsearch + - receiver/envoyals - receiver/expvar - receiver/filelog - receiver/filestats diff --git a/.github/ISSUE_TEMPLATE/unmaintained.yaml b/.github/ISSUE_TEMPLATE/unmaintained.yaml index 120bf2c250d6..c7148dc20c9a 100644 --- a/.github/ISSUE_TEMPLATE/unmaintained.yaml +++ b/.github/ISSUE_TEMPLATE/unmaintained.yaml @@ -219,6 +219,7 @@ body: - receiver/datadog - receiver/dockerstats - receiver/elasticsearch + - receiver/envoyals - receiver/expvar - receiver/filelog - receiver/filestats diff --git a/.github/workflows/check-codeowners.yaml b/.github/workflows/check-codeowners.yaml index ba4e638e31b9..d700b1dc5b06 100644 --- a/.github/workflows/check-codeowners.yaml +++ b/.github/workflows/check-codeowners.yaml @@ -21,42 +21,40 @@ concurrency: cancel-in-progress: true jobs: - setup-environment: + check-codeowners: timeout-minutes: 30 runs-on: ubuntu-24.04 if: ${{ github.actor != 'dependabot[bot]' && github.repository == 'open-telemetry/opentelemetry-collector-contrib' }} steps: - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: "1.22.8" cache: false - - name: Cache Go - id: go-cache + + - name: Cache Go Tools + id: go-tools-cache timeout-minutes: 5 uses: actions/cache@v4 with: path: | - ~/go/bin - ~/go/pkg/mod ./.tools - key: go-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - check-codeowners: - runs-on: ubuntu-24.04 - needs: [setup-environment] - steps: - - uses: actions/checkout@v4 - - name: Gen githubgen tool + key: go-tools-${{ runner.os }}-${{ hashFiles('internal/tools/go.sum') }} + + - name: Install tools + if: github.repository == 'open-telemetry/opentelemetry-collector-contrib' && steps.go-tools-cache.outputs.cache-hit != 'true' run: | - make githubgen-install - echo "$(go env GOPATH)/bin" >> $GITHUB_PATH + make install-tools + - uses: actions/checkout@v4 with: ref: ${{github.event.pull_request.head.ref}} repository: ${{github.event.pull_request.head.repo.full_name}} path: pr + - name: Gen CODEOWNERS run: | cd pr - GITHUB_TOKEN=${{ secrets.READ_ORG_AND_USER_TOKEN }} githubgen + GITHUB_TOKEN=${{ secrets.READ_ORG_AND_USER_TOKEN }} ../.tools/githubgen codeowners git diff -s --exit-code || (echo 'Generated code is out of date, please apply this diff and commit the changes in this PR.' && git diff && exit 1) diff --git a/.golangci.yml b/.golangci.yml index c50cadc005fd..ccf76478d4dc 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -166,6 +166,9 @@ linters-settings: explicit-exhaustive-switch: true ignore-enum-members: "pmetric.MetricTypeEmpty" + nolintlint: + require-specific: true + predeclared: ignore: copy @@ -201,6 +204,7 @@ linters: - gosec - govet - misspell + - nolintlint - predeclared - reassign - revive @@ -227,3 +231,7 @@ issues: - text: "G115:" linters: - gosec + - path: "pagefile.go" # This exclusion is required for Windows only + text: "cachedBytes" + linters: + - unused diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7b2ca3c127d2..fce6b7f09c0d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -215,6 +215,7 @@ When submitting a component to the community, consider breaking it down into sep * `make generate` * `make multimod-verify` * `make generate-gh-issue-templates` + * `make gengithub` * `make addlicense` * **Second PR** should include the concrete implementation of the component. If the size of this PR is larger than the recommended size consider splitting it in diff --git a/Makefile b/Makefile index 341c909dbfc3..fdbe5d322176 100644 --- a/Makefile +++ b/Makefile @@ -310,20 +310,16 @@ generate: install-tools PATH="$$PWD/.tools:$$PATH" $(MAKE) for-all CMD="$(GOCMD) generate ./..." $(MAKE) gofmt -.PHONY: githubgen-install -githubgen-install: - cd cmd/githubgen && $(GOCMD) install . - .PHONY: gengithub -gengithub: githubgen-install - githubgen +gengithub: $(GITHUBGEN) + $(GITHUBGEN) .PHONY: gendistributions -gendistributions: githubgen-install - githubgen distributions +gendistributions: $(GITHUBGEN) + $(GITHUBGEN) distributions .PHONY: update-codeowners -update-codeowners: gengithub generate +update-codeowners: generate gengithub FILENAME?=$(shell git branch --show-current) .PHONY: chlog-new @@ -344,7 +340,7 @@ chlog-update: $(CHLOGGEN) .PHONY: genotelcontribcol genotelcontribcol: $(BUILDER) - $(BUILDER) --skip-compilation --config cmd/otelcontribcol/builder-config.yaml --output-path cmd/otelcontribcol + $(BUILDER) --skip-compilation --config cmd/otelcontribcol/builder-config.yaml # Build the Collector executable. .PHONY: otelcontribcol @@ -360,7 +356,7 @@ otelcontribcollite: genotelcontribcol .PHONY: genoteltestbedcol genoteltestbedcol: $(BUILDER) - $(BUILDER) --skip-compilation --config cmd/oteltestbedcol/builder-config.yaml --output-path cmd/oteltestbedcol + $(BUILDER) --skip-compilation --config cmd/oteltestbedcol/builder-config.yaml # Build the Collector executable, with only components used in testbed. .PHONY: oteltestbedcol @@ -423,6 +419,8 @@ update-otel:$(MULTIMOD) $(MAKE) genoteltestbedcol $(MAKE) generate $(MAKE) crosslink + # Tidy again after generating code + $(MAKE) gotidy $(MAKE) remove-toolchain git add . && git commit -s -m "[chore] mod and toolchain tidy" ; \ @@ -557,8 +555,7 @@ clean: .PHONY: generate-gh-issue-templates generate-gh-issue-templates: - cd cmd/githubgen && $(GOCMD) install . - githubgen issue-templates + $(GITHUBGEN) issue-templates .PHONY: checks checks: diff --git a/Makefile.Common b/Makefile.Common index e370566008a6..1f8f3161e3ac 100644 --- a/Makefile.Common +++ b/Makefile.Common @@ -68,6 +68,7 @@ MISSPELL_CORRECTION := $(TOOLS_BIN_DIR)/misspell -w LINT := $(TOOLS_BIN_DIR)/golangci-lint MULTIMOD := $(TOOLS_BIN_DIR)/multimod CHLOGGEN := $(TOOLS_BIN_DIR)/chloggen +GITHUBGEN := $(TOOLS_BIN_DIR)/githubgen GOIMPORTS := $(TOOLS_BIN_DIR)/goimports PORTO := $(TOOLS_BIN_DIR)/porto CHECKFILE := $(TOOLS_BIN_DIR)/checkfile diff --git a/cmd/githubgen/allowlist.txt b/cmd/githubgen/allowlist.txt index b5d97e94fa7c..838360612c7b 100644 --- a/cmd/githubgen/allowlist.txt +++ b/cmd/githubgen/allowlist.txt @@ -2,7 +2,6 @@ abhishek-at-cloudwerx adcharre Caleb-Hurshman cemdk -cheempz dlopes7 driverpt dsimil diff --git a/cmd/githubgen/metadata.yaml b/cmd/githubgen/metadata.yaml index a0d01e83f024..51a898cd7e2d 100644 --- a/cmd/githubgen/metadata.yaml +++ b/cmd/githubgen/metadata.yaml @@ -1,6 +1,8 @@ type: githubgen status: + stability: + deprecated: [] class: cmd codeowners: - active: [atoulme] \ No newline at end of file + active: [atoulme] diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index 40551f955ffb..e7875b6b787d 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -11,6 +11,7 @@ dist: name: otelcontribcol description: Local OpenTelemetry Collector Contrib binary, testing only. version: 0.118.0-dev + output_path: ./cmd/otelcontribcol extensions: - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.1-0.20250121185328-fbefb22cc2b3 @@ -355,6 +356,7 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil => ../../internal/aws/ecsutil - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/rabbitmqreceiver => ../../receiver/rabbitmqreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver => ../../receiver/elasticsearchreceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver => ../../receiver/envoyalsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor => ../../processor/metricsgenerationprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor => ../../processor/attributesprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver => ../../receiver/sqlqueryreceiver diff --git a/cmd/oteltestbedcol/builder-config.yaml b/cmd/oteltestbedcol/builder-config.yaml index 10bde53fda93..8bc4403afd30 100644 --- a/cmd/oteltestbedcol/builder-config.yaml +++ b/cmd/oteltestbedcol/builder-config.yaml @@ -8,6 +8,7 @@ dist: name: oteltestbedcol description: OpenTelemetry Collector binary for testbed only tests. version: 0.118.0-dev + output_path: ./cmd/oteltestbedcol extensions: - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.1-0.20250121185328-fbefb22cc2b3 diff --git a/cmd/telemetrygen/internal/logs/worker.go b/cmd/telemetrygen/internal/logs/worker.go index d259e0abc8a1..26de676e8efe 100644 --- a/cmd/telemetrygen/internal/logs/worker.go +++ b/cmd/telemetrygen/internal/logs/worker.go @@ -58,13 +58,11 @@ func (w worker) simulateLogs(res *resource.Resource, exporterFunc func() (sdklog if w.spanID != "" { // we checked this for errors in the Validate function - // nolint: errcheck b, _ := hex.DecodeString(w.spanID) sid = trace.SpanID(b) } if w.traceID != "" { // we checked this for errors in the Validate function - // nolint: errcheck b, _ := hex.DecodeString(w.traceID) tid = trace.TraceID(b) } diff --git a/cmd/telemetrygen/internal/metrics/metrics.go b/cmd/telemetrygen/internal/metrics/metrics.go index baed4c4b5362..ec05571412e5 100644 --- a/cmd/telemetrygen/internal/metrics/metrics.go +++ b/cmd/telemetrygen/internal/metrics/metrics.go @@ -128,14 +128,12 @@ func exemplarsFromConfig(c *Config) []metricdata.Exemplar[int64] { if c.TraceID != "" { // we validated this already during the Validate() function for config - // nolint: errcheck traceID, _ := hex.DecodeString(c.TraceID) exemplar.TraceID = traceID } if c.SpanID != "" { // we validated this already during the Validate() function for config - // nolint: errcheck spanID, _ := hex.DecodeString(c.SpanID) exemplar.SpanID = spanID } diff --git a/connector/datadogconnector/example_test.go b/connector/datadogconnector/example_test.go index b9c297f5c559..f32d5a16b48d 100644 --- a/connector/datadogconnector/example_test.go +++ b/connector/datadogconnector/example_test.go @@ -25,8 +25,6 @@ func TestExamples(t *testing.T) { t.Setenv("DD_API_KEY", "aaaaaaaaa") factories := newTestComponents(t) const configFile = "./examples/config.yaml" - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck _, err := otelcoltest.LoadConfigAndValidate(configFile, factories) require.NoError(t, err, "All yaml config must validate. Please ensure that all necessary component factories are added in newTestComponents()") } diff --git a/connector/servicegraphconnector/config_test.go b/connector/servicegraphconnector/config_test.go index 1b8ee6d12ef0..58513942a9af 100644 --- a/connector/servicegraphconnector/config_test.go +++ b/connector/servicegraphconnector/config_test.go @@ -22,8 +22,6 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, err) factories.Connectors[metadata.Type] = NewFactory() - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "service-graph-connector-config.yaml"), factories) // Verify diff --git a/connector/servicegraphconnector/internal/store/store.go b/connector/servicegraphconnector/internal/store/store.go index 3c095703984d..f1266c7bb885 100644 --- a/connector/servicegraphconnector/internal/store/store.go +++ b/connector/servicegraphconnector/internal/store/store.go @@ -110,7 +110,7 @@ func (s *Store) Expire() { defer s.mtx.Unlock() // Iterates until no more items can be evicted - for s.tryEvictHead() { // nolint + for s.tryEvictHead() { } } diff --git a/exporter/awskinesisexporter/internal/batch/batch.go b/exporter/awskinesisexporter/internal/batch/batch.go index 7a13754134ff..272f0e4cce88 100644 --- a/exporter/awskinesisexporter/internal/batch/batch.go +++ b/exporter/awskinesisexporter/internal/batch/batch.go @@ -7,7 +7,7 @@ import ( "errors" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/kinesis/types" //nolint:staticcheck // Some encoding types uses legacy prototype version + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "go.opentelemetry.io/collector/consumer/consumererror" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awskinesisexporter/internal/compress" diff --git a/exporter/awss3exporter/config_test.go b/exporter/awss3exporter/config_test.go index ec72b167d3eb..9124bda86785 100644 --- a/exporter/awss3exporter/config_test.go +++ b/exporter/awss3exporter/config_test.go @@ -24,8 +24,6 @@ func TestLoadConfig(t *testing.T) { factory := NewFactory() factories.Exporters[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "default.yaml"), factories) require.NoError(t, err) @@ -57,8 +55,6 @@ func TestConfig(t *testing.T) { factory := NewFactory() factories.Exporters[factory.Type()] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate( filepath.Join("testdata", "config.yaml"), factories) @@ -93,8 +89,6 @@ func TestConfigForS3CompatibleSystems(t *testing.T) { factory := NewFactory() factories.Exporters[factory.Type()] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate( filepath.Join("testdata", "config-s3-compatible-systems.yaml"), factories) @@ -209,8 +203,6 @@ func TestMarshallerName(t *testing.T) { factory := NewFactory() factories.Exporters[factory.Type()] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate( filepath.Join("testdata", "marshaler.yaml"), factories) @@ -253,8 +245,6 @@ func TestCompressionName(t *testing.T) { factory := NewFactory() factories.Exporters[factory.Type()] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate( filepath.Join("testdata", "compression.yaml"), factories) diff --git a/exporter/awss3exporter/internal/upload/writer.go b/exporter/awss3exporter/internal/upload/writer.go index d65544e5b4f4..ef4c8c9f2227 100644 --- a/exporter/awss3exporter/internal/upload/writer.go +++ b/exporter/awss3exporter/internal/upload/writer.go @@ -63,7 +63,7 @@ func (sw *s3manager) Upload(ctx context.Context, data []byte) error { } func (sw *s3manager) contentBuffer(raw []byte) (*bytes.Buffer, error) { - //nolint: gocritic // Leaving this as a switch statement to make it easier to add more later compressions + //nolint:gocritic // Leaving this as a switch statement to make it easier to add more later compressions switch sw.builder.Compression { case configcompression.TypeGzip: content := bytes.NewBuffer(nil) diff --git a/exporter/clickhouseexporter/exporter_metrics_test.go b/exporter/clickhouseexporter/exporter_metrics_test.go index b9b8255bf355..f875752e69e9 100644 --- a/exporter/clickhouseexporter/exporter_metrics_test.go +++ b/exporter/clickhouseexporter/exporter_metrics_test.go @@ -518,7 +518,6 @@ func mustPushMetricsData(t *testing.T, exporter *metricsExporter, md pmetric.Met require.NoError(t, err) } -// nolint:unparam // not need to check this func func newTestMetricsExporter(t *testing.T, dsn string, fns ...func(*Config)) *metricsExporter { exporter, err := newMetricsExporter(zaptest.NewLogger(t), withTestExporterConfig(fns...)(dsn)) require.NoError(t, err) diff --git a/exporter/datadogexporter/examples_test.go b/exporter/datadogexporter/examples_test.go index bcfb3ebc7f8b..5db7e56c0cf9 100644 --- a/exporter/datadogexporter/examples_test.go +++ b/exporter/datadogexporter/examples_test.go @@ -55,8 +55,6 @@ func TestExamples(t *testing.T) { t.Run(filepath.Base(f.Name()), func(t *testing.T) { t.Setenv("DD_API_KEY", "aaaaaaaaa") name := filepath.Join(folder, f.Name()) - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck _, err := otelcoltest.LoadConfigAndValidate(name, factories) require.NoError(t, err, "All yaml config must validate. Please ensure that all necessary component factories are added in newTestComponents()") }) @@ -85,8 +83,6 @@ func TestExamples(t *testing.T) { require.Len(t, data, n) require.NoError(t, f.Close()) defer os.RemoveAll(f.Name()) - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck _, err = otelcoltest.LoadConfigAndValidate(f.Name(), factories) require.NoError(t, err, "All yaml config must validate. Please ensure that all necessary component factories are added in newTestComponents()") }) diff --git a/exporter/datadogexporter/integrationtest/integration_test.go b/exporter/datadogexporter/integrationtest/integration_test.go index 7d04569e859e..c3adaf12d232 100644 --- a/exporter/datadogexporter/integrationtest/integration_test.go +++ b/exporter/datadogexporter/integrationtest/integration_test.go @@ -94,6 +94,9 @@ func testIntegration(t *testing.T) { defer server.Close() t.Setenv("SERVER_URL", server.URL) t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + t.Setenv("OTLP_HTTP_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + otlpGRPCEndpoint := commonTestutil.GetAvailableLocalAddress(t) + t.Setenv("OTLP_GRPC_SERVER", otlpGRPCEndpoint) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -106,7 +109,7 @@ func testIntegration(t *testing.T) { waitForReadiness(app) // 3. Generate and send traces - sendTraces(t) + sendTraces(t, otlpGRPCEndpoint) // 4. Validate traces and APM stats from the mock server var spans []*pb.Span @@ -188,8 +191,6 @@ func getIntegrationTestComponents(t *testing.T) otelcol.Factories { } func getIntegrationTestCollector(t *testing.T, cfgFile string, factories otelcol.Factories) *otelcol.Collector { - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck _, err := otelcoltest.LoadConfigAndValidate(cfgFile, factories) require.NoError(t, err, "All yaml config must be valid.") @@ -225,11 +226,11 @@ func waitForReadiness(app *otelcol.Collector) { } } -func sendTraces(t *testing.T) { +func sendTraces(t *testing.T, endpoint string) { ctx := context.Background() // Set up OTel-Go SDK and exporter - traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure()) + traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithEndpoint(endpoint)) require.NoError(t, err) bsp := sdktrace.NewBatchSpanProcessor(traceExporter) r1, _ := resource.New(ctx, resource.WithAttributes(attribute.String("k8s.node.name", "aaaa"))) @@ -287,6 +288,9 @@ func TestIntegrationComputeTopLevelBySpanKind(t *testing.T) { defer server.Close() t.Setenv("SERVER_URL", server.URL) t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + t.Setenv("OTLP_HTTP_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + otlpGRPCEndpoint := commonTestutil.GetAvailableLocalAddress(t) + t.Setenv("OTLP_GRPC_SERVER", otlpGRPCEndpoint) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -299,7 +303,7 @@ func TestIntegrationComputeTopLevelBySpanKind(t *testing.T) { waitForReadiness(app) // 3. Generate and send traces - sendTracesComputeTopLevelBySpanKind(t) + sendTracesComputeTopLevelBySpanKind(t, otlpGRPCEndpoint) // 4. Validate traces and APM stats from the mock server var spans []*pb.Span @@ -387,11 +391,11 @@ func TestIntegrationComputeTopLevelBySpanKind(t *testing.T) { } } -func sendTracesComputeTopLevelBySpanKind(t *testing.T) { +func sendTracesComputeTopLevelBySpanKind(t *testing.T, endpoint string) { ctx := context.Background() // Set up OTel-Go SDK and exporter - traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure()) + traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithEndpoint(endpoint)) require.NoError(t, err) bsp := sdktrace.NewBatchSpanProcessor(traceExporter) r1, _ := resource.New(ctx, resource.WithAttributes(attribute.String("k8s.node.name", "aaaa"))) @@ -469,6 +473,9 @@ func TestIntegrationLogs(t *testing.T) { thing := commonTestutil.GetAvailableLocalAddress(t) t.Setenv("SERVER_URL", server.URL) t.Setenv("PROM_SERVER", thing) + t.Setenv("OTLP_HTTP_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + otlpGRPCEndpoint := commonTestutil.GetAvailableLocalAddress(t) + t.Setenv("OTLP_GRPC_SERVER", otlpGRPCEndpoint) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -481,7 +488,7 @@ func TestIntegrationLogs(t *testing.T) { waitForReadiness(app) // 3. Generate and send logs - sendLogs(t, 5) + sendLogs(t, 5, otlpGRPCEndpoint) // 4. Validate logs and metrics from the mock server // Wait until `doneChannel` is closed and prometheus metrics are received. @@ -525,9 +532,9 @@ func TestIntegrationLogs(t *testing.T) { assert.Equal(t, 2, numSentLogRecords) } -func sendLogs(t *testing.T, numLogs int) { +func sendLogs(t *testing.T, numLogs int, endpoint string) { ctx := context.Background() - logExporter, err := otlploggrpc.New(ctx, otlploggrpc.WithInsecure()) + logExporter, err := otlploggrpc.New(ctx, otlploggrpc.WithInsecure(), otlploggrpc.WithEndpoint(endpoint)) assert.NoError(t, err) lr := make([]log.Record, numLogs) assert.NoError(t, logExporter.Export(ctx, lr)) diff --git a/exporter/datadogexporter/integrationtest/integration_test_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_config.yaml index c32bf27da49e..b29a7ebf4f83 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_config.yaml @@ -3,9 +3,9 @@ receivers: otlp: protocols: http: - endpoint: "localhost:4318" + endpoint: ${env:OTLP_HTTP_SERVER} grpc: - endpoint: "localhost:4317" + endpoint: ${env:OTLP_GRPC_SERVER} processors: tail_sampling: diff --git a/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml index 47a7115436dd..1d1e6762b998 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_internal_metrics_config.yaml @@ -3,9 +3,9 @@ receivers: otlp: protocols: http: - endpoint: "localhost:4318" + endpoint: ${env:OTLP_HTTP_SERVER} grpc: - endpoint: "localhost:4317" + endpoint: ${env:OTLP_GRPC_SERVER} prometheus: config: scrape_configs: diff --git a/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml index 28ac1ff0789b..ae2af2358967 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_logs_config.yaml @@ -3,9 +3,9 @@ receivers: otlp: protocols: http: - endpoint: "localhost:4318" + endpoint: ${env:OTLP_HTTP_SERVER} grpc: - endpoint: "localhost:4317" + endpoint: ${env:OTLP_GRPC_SERVER} prometheus: config: scrape_configs: diff --git a/exporter/datadogexporter/integrationtest/integration_test_toplevel_config.yaml b/exporter/datadogexporter/integrationtest/integration_test_toplevel_config.yaml index e542b95d3626..2a997226be99 100644 --- a/exporter/datadogexporter/integrationtest/integration_test_toplevel_config.yaml +++ b/exporter/datadogexporter/integrationtest/integration_test_toplevel_config.yaml @@ -3,9 +3,9 @@ receivers: otlp: protocols: http: - endpoint: "localhost:4318" + endpoint: ${env:OTLP_HTTP_SERVER} grpc: - endpoint: "localhost:4317" + endpoint: ${env:OTLP_GRPC_SERVER} connectors: datadog/connector: diff --git a/exporter/datadogexporter/integrationtest/no_race_integration_test.go b/exporter/datadogexporter/integrationtest/no_race_integration_test.go index 7c7bb38651b2..82ec0724ff9e 100644 --- a/exporter/datadogexporter/integrationtest/no_race_integration_test.go +++ b/exporter/datadogexporter/integrationtest/no_race_integration_test.go @@ -29,6 +29,9 @@ func TestIntegrationInternalMetrics(t *testing.T) { defer server.Close() t.Setenv("SERVER_URL", server.URL) t.Setenv("PROM_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + t.Setenv("OTLP_HTTP_SERVER", commonTestutil.GetAvailableLocalAddress(t)) + otlpGRPCEndpoint := commonTestutil.GetAvailableLocalAddress(t) + t.Setenv("OTLP_GRPC_SERVER", otlpGRPCEndpoint) // 2. Start in-process collector factories := getIntegrationTestComponents(t) @@ -41,7 +44,7 @@ func TestIntegrationInternalMetrics(t *testing.T) { waitForReadiness(app) // 3. Generate and send traces - sendTraces(t) + sendTraces(t, otlpGRPCEndpoint) // 4. Validate Datadog trace agent & OTel internal metrics are sent to the mock server expectedMetrics := map[string]struct{}{ diff --git a/exporter/datadogexporter/internal/metrics/consumer.go b/exporter/datadogexporter/internal/metrics/consumer.go index 2d43e604ea5d..4470663fa4b5 100644 --- a/exporter/datadogexporter/internal/metrics/consumer.go +++ b/exporter/datadogexporter/internal/metrics/consumer.go @@ -70,7 +70,7 @@ func (c *Consumer) runningMetrics(timestamp uint64, buildInfo component.BuildInf } for _, lang := range metadata.Languages { - tags := append(buildTags, "language:"+lang) // nolint + tags := append(buildTags, "language:"+lang) //nolint:gocritic runningMetric := DefaultMetrics("runtime_metrics", "", timestamp, tags) series = append(series, runningMetric...) } diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md index aa8bb6518ccd..d78d5cba8421 100644 --- a/exporter/elasticsearchexporter/README.md +++ b/exporter/elasticsearchexporter/README.md @@ -142,6 +142,9 @@ This can be customised through the following settings: - `prefix_separator`(default=`-`): Set a separator between logstash_prefix and date. - `date_format`(default=`%Y.%m.%d`): Time format (based on strftime) to generate the second part of the Index name. +- `logs_dynamic_id` (optional): Dynamically determines the document ID to be used in Elasticsearch based on a log record attribute. + - `enabled`(default=false): Enable/Disable dynamic ID for log records. If `elasticsearch.document_id` exists and is not an empty string in the log record attributes, it will be used as the document ID. Otherwise, the document ID will be generated by Elasticsearch. The attribute `elasticsearch.document_id` is removed from the final document. + ### Elasticsearch document mapping The Elasticsearch exporter supports several document schemas and preprocessing diff --git a/exporter/elasticsearchexporter/bulkindexer.go b/exporter/elasticsearchexporter/bulkindexer.go index 2200216be4ef..ded879d3a036 100644 --- a/exporter/elasticsearchexporter/bulkindexer.go +++ b/exporter/elasticsearchexporter/bulkindexer.go @@ -31,7 +31,7 @@ type bulkIndexer interface { type bulkIndexerSession interface { // Add adds a document to the bulk indexing session. - Add(ctx context.Context, index string, document io.WriterTo, dynamicTemplates map[string]string) error + Add(ctx context.Context, index string, docID string, document io.WriterTo, dynamicTemplates map[string]string) error // End must be called on the session object once it is no longer // needed, in order to release any associated resources. @@ -126,8 +126,9 @@ type syncBulkIndexerSession struct { } // Add adds an item to the sync bulk indexer session. -func (s *syncBulkIndexerSession) Add(ctx context.Context, index string, document io.WriterTo, dynamicTemplates map[string]string) error { - err := s.bi.Add(docappender.BulkIndexerItem{Index: index, Body: document, DynamicTemplates: dynamicTemplates}) +func (s *syncBulkIndexerSession) Add(ctx context.Context, index string, docID string, document io.WriterTo, dynamicTemplates map[string]string) error { + doc := docappender.BulkIndexerItem{Index: index, Body: document, DocumentID: docID, DynamicTemplates: dynamicTemplates} + err := s.bi.Add(doc) if err != nil { return err } @@ -248,10 +249,11 @@ func (a *asyncBulkIndexer) Close(ctx context.Context) error { // Add adds an item to the async bulk indexer session. // // Adding an item after a call to Close() will panic. -func (s asyncBulkIndexerSession) Add(ctx context.Context, index string, document io.WriterTo, dynamicTemplates map[string]string) error { +func (s asyncBulkIndexerSession) Add(ctx context.Context, index string, docID string, document io.WriterTo, dynamicTemplates map[string]string) error { item := docappender.BulkIndexerItem{ Index: index, Body: document, + DocumentID: docID, DynamicTemplates: dynamicTemplates, } select { diff --git a/exporter/elasticsearchexporter/bulkindexer_test.go b/exporter/elasticsearchexporter/bulkindexer_test.go index 2b3d86a30128..9f2139e83710 100644 --- a/exporter/elasticsearchexporter/bulkindexer_test.go +++ b/exporter/elasticsearchexporter/bulkindexer_test.go @@ -102,7 +102,7 @@ func TestAsyncBulkIndexer_flush(t *testing.T) { session, err := bulkIndexer.StartSession(context.Background()) require.NoError(t, err) - assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, session.Add(context.Background(), "foo", "", strings.NewReader(`{"foo": "bar"}`), nil)) // should flush time.Sleep(100 * time.Millisecond) assert.Equal(t, int64(1), bulkIndexer.stats.docsIndexed.Load()) @@ -229,7 +229,7 @@ func TestAsyncBulkIndexer_flush_error(t *testing.T) { session, err := bulkIndexer.StartSession(context.Background()) require.NoError(t, err) - assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, session.Add(context.Background(), "foo", "", strings.NewReader(`{"foo": "bar"}`), nil)) // should flush time.Sleep(100 * time.Millisecond) assert.Equal(t, int64(0), bulkIndexer.stats.docsIndexed.Load()) @@ -312,7 +312,7 @@ func runBulkIndexerOnce(t *testing.T, config *Config, client *elasticsearch.Clie session, err := bulkIndexer.StartSession(context.Background()) require.NoError(t, err) - assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, session.Add(context.Background(), "foo", "", strings.NewReader(`{"foo": "bar"}`), nil)) assert.NoError(t, bulkIndexer.Close(context.Background())) return bulkIndexer @@ -338,7 +338,7 @@ func TestSyncBulkIndexer_flushBytes(t *testing.T) { session, err := bi.StartSession(context.Background()) require.NoError(t, err) - assert.NoError(t, session.Add(context.Background(), "foo", strings.NewReader(`{"foo": "bar"}`), nil)) + assert.NoError(t, session.Add(context.Background(), "foo", "", strings.NewReader(`{"foo": "bar"}`), nil)) assert.Equal(t, int64(1), reqCnt.Load()) // flush due to flush::bytes assert.NoError(t, bi.Close(context.Background())) } diff --git a/exporter/elasticsearchexporter/config.go b/exporter/elasticsearchexporter/config.go index 40acdaf99497..bd246a398b8b 100644 --- a/exporter/elasticsearchexporter/config.go +++ b/exporter/elasticsearchexporter/config.go @@ -53,6 +53,9 @@ type Config struct { // fall back to pure TracesIndex, if 'elasticsearch.index.prefix' or 'elasticsearch.index.suffix' are not found in resource or attribute (prio: resource > attribute) TracesDynamicIndex DynamicIndexSetting `mapstructure:"traces_dynamic_index"` + // LogsDynamicID configures whether log record attribute `elasticsearch.document_id` is set as the document ID in ES. + LogsDynamicID DynamicIDSettings `mapstructure:"logs_dynamic_id"` + // Pipeline configures the ingest node pipeline name that should be used to process the // events. // @@ -112,6 +115,10 @@ type DynamicIndexSetting struct { Enabled bool `mapstructure:"enabled"` } +type DynamicIDSettings struct { + Enabled bool `mapstructure:"enabled"` +} + // AuthenticationSettings defines user authentication related settings. type AuthenticationSettings struct { // User is used to configure HTTP Basic Authentication. diff --git a/exporter/elasticsearchexporter/config_test.go b/exporter/elasticsearchexporter/config_test.go index 153001b149e2..51d3955ebbd7 100644 --- a/exporter/elasticsearchexporter/config_test.go +++ b/exporter/elasticsearchexporter/config_test.go @@ -73,6 +73,9 @@ func TestConfig(t *testing.T) { TracesDynamicIndex: DynamicIndexSetting{ Enabled: false, }, + LogsDynamicID: DynamicIDSettings{ + Enabled: false, + }, Pipeline: "mypipeline", ClientConfig: withDefaultHTTPClientConfig(func(cfg *confighttp.ClientConfig) { cfg.Timeout = 2 * time.Minute @@ -144,6 +147,9 @@ func TestConfig(t *testing.T) { TracesDynamicIndex: DynamicIndexSetting{ Enabled: false, }, + LogsDynamicID: DynamicIDSettings{ + Enabled: false, + }, Pipeline: "mypipeline", ClientConfig: withDefaultHTTPClientConfig(func(cfg *confighttp.ClientConfig) { cfg.Timeout = 2 * time.Minute @@ -215,6 +221,9 @@ func TestConfig(t *testing.T) { TracesDynamicIndex: DynamicIndexSetting{ Enabled: false, }, + LogsDynamicID: DynamicIDSettings{ + Enabled: false, + }, Pipeline: "mypipeline", ClientConfig: withDefaultHTTPClientConfig(func(cfg *confighttp.ClientConfig) { cfg.Timeout = 2 * time.Minute diff --git a/exporter/elasticsearchexporter/exporter.go b/exporter/elasticsearchexporter/exporter.go index b13d1336b94d..52bb13a599e3 100644 --- a/exporter/elasticsearchexporter/exporter.go +++ b/exporter/elasticsearchexporter/exporter.go @@ -22,6 +22,11 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/pool" ) +const ( + // documentIDAttributeName is the attribute name used to specify the document ID. + documentIDAttributeName = "elasticsearch.document_id" +) + type elasticsearchExporter struct { component.TelemetrySettings userAgent string @@ -176,13 +181,15 @@ func (e *elasticsearchExporter) pushLogRecord( } buf := e.bufferPool.NewPooledBuffer() + docID := e.extractDocumentIDAttribute(record.Attributes()) err := e.model.encodeLog(resource, resourceSchemaURL, record, scope, scopeSchemaURL, fIndex, buf.Buffer) if err != nil { buf.Recycle() return fmt.Errorf("failed to encode log event: %w", err) } + // not recycling after Add returns an error as we don't know if it's already recycled - return bulkIndexerSession.Add(ctx, fIndex.Index, buf, nil) + return bulkIndexerSession.Add(ctx, fIndex.Index, docID, buf, nil) } func (e *elasticsearchExporter) pushMetricsData( @@ -299,7 +306,7 @@ func (e *elasticsearchExporter) pushMetricsData( errs = append(errs, err) continue } - if err := session.Add(ctx, fIndex.Index, buf, dynamicTemplates); err != nil { + if err := session.Add(ctx, fIndex.Index, "", buf, dynamicTemplates); err != nil { // not recycling after Add returns an error as we don't know if it's already recycled if cerr := ctx.Err(); cerr != nil { return cerr @@ -422,7 +429,7 @@ func (e *elasticsearchExporter) pushTraceRecord( return fmt.Errorf("failed to encode trace record: %w", err) } // not recycling after Add returns an error as we don't know if it's already recycled - return bulkIndexerSession.Add(ctx, fIndex.Index, buf, nil) + return bulkIndexerSession.Add(ctx, fIndex.Index, "", buf, nil) } func (e *elasticsearchExporter) pushSpanEvent( @@ -454,5 +461,17 @@ func (e *elasticsearchExporter) pushSpanEvent( return nil } // not recycling after Add returns an error as we don't know if it's already recycled - return bulkIndexerSession.Add(ctx, fIndex.Index, buf, nil) + return bulkIndexerSession.Add(ctx, fIndex.Index, "", buf, nil) +} + +func (e *elasticsearchExporter) extractDocumentIDAttribute(m pcommon.Map) string { + if !e.config.LogsDynamicID.Enabled { + return "" + } + + v, ok := m.Get(documentIDAttributeName) + if !ok { + return "" + } + return v.AsString() } diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go index b045ccb325d1..74a6ec5dfcfb 100644 --- a/exporter/elasticsearchexporter/exporter_test.go +++ b/exporter/elasticsearchexporter/exporter_test.go @@ -736,6 +736,82 @@ func TestExporterLogs(t *testing.T) { assert.JSONEq(t, `{"a":"a","a.b":"a.b"}`, gjson.GetBytes(doc, `resource.attributes`).Raw) }) + t.Run("publish logs with dynamic id", func(t *testing.T) { + t.Parallel() + exampleDocID := "abc123" + tableTests := []struct { + name string + expectedDocID string // "" means the _id will not be set + recordAttrs map[string]any + }{ + { + name: "missing document id attribute should not set _id", + expectedDocID: "", + }, + { + name: "empty document id attribute should not set _id", + expectedDocID: "", + recordAttrs: map[string]any{ + documentIDAttributeName: "", + }, + }, + { + name: "record attributes", + expectedDocID: exampleDocID, + recordAttrs: map[string]any{ + documentIDAttributeName: exampleDocID, + }, + }, + } + + cfgs := map[string]func(*Config){ + "async": func(cfg *Config) { + batcherEnabled := false + cfg.Batcher.Enabled = &batcherEnabled + }, + "sync": func(cfg *Config) { + batcherEnabled := true + cfg.Batcher.Enabled = &batcherEnabled + cfg.Batcher.FlushTimeout = 10 * time.Millisecond + }, + } + for _, tt := range tableTests { + for cfgName, cfgFn := range cfgs { + t.Run(tt.name+"/"+cfgName, func(t *testing.T) { + t.Parallel() + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + + if tt.expectedDocID == "" { + assert.NotContains(t, string(docs[0].Action), "_id", "expected _id to not be set") + } else { + assert.Equal(t, tt.expectedDocID, actionJSONToID(t, docs[0].Action), "expected _id to be set") + } + + // Ensure the document id attribute is removed from the final document. + assert.NotContains(t, string(docs[0].Document), documentIDAttributeName, "expected document id attribute to be removed") + return itemsAllOK(docs) + }) + + exporter := newTestLogsExporter(t, server.URL, func(cfg *Config) { + cfg.Mapping.Mode = "otel" + cfg.LogsDynamicID.Enabled = true + cfgFn(cfg) + }) + logs := newLogsWithAttributes( + tt.recordAttrs, + map[string]any{}, + map[string]any{}, + ) + logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Body().SetStr("hello world") + mustSendLogs(t, exporter, logs) + + rec.WaitItems(1) + }) + } + } + }) t.Run("otel mode attribute complex value", func(t *testing.T) { rec := newBulkRecorder() server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { @@ -1943,3 +2019,14 @@ func actionJSONToIndex(t *testing.T, actionJSON json.RawMessage) string { require.NoError(t, err) return action.Create.Index } + +func actionJSONToID(t *testing.T, actionJSON json.RawMessage) string { + action := struct { + Create struct { + ID string `json:"_id"` + } `json:"create"` + }{} + err := json.Unmarshal(actionJSON, &action) + require.NoError(t, err) + return action.Create.ID +} diff --git a/exporter/elasticsearchexporter/factory.go b/exporter/elasticsearchexporter/factory.go index 755a4e3d241b..c72ecbfc0fd1 100644 --- a/exporter/elasticsearchexporter/factory.go +++ b/exporter/elasticsearchexporter/factory.go @@ -62,6 +62,9 @@ func createDefaultConfig() component.Config { TracesDynamicIndex: DynamicIndexSetting{ Enabled: false, }, + LogsDynamicID: DynamicIDSettings{ + Enabled: false, + }, Retry: RetrySettings{ Enabled: true, MaxRetries: 0, // default is set in exporter code diff --git a/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go b/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go index 31fc33c55c2d..f1912421fc51 100644 --- a/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go +++ b/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go @@ -69,5 +69,5 @@ func safeUint64ToInt64(v uint64) int64 { if v > math.MaxInt64 { return math.MaxInt64 } - return int64(v) // nolint:goset // overflow checked + return int64(v) //nolint:goset // overflow checked } diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index 89f1cb1fb68b..3843e9bc0f60 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -775,7 +775,7 @@ func valueHash(h hash.Hash, v pcommon.Value) { h.Write(buf) case pcommon.ValueTypeInt: buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, uint64(v.Int())) // nolint:gosec // Overflow assumed. We prefer having high integers over zero. + binary.LittleEndian.PutUint64(buf, uint64(v.Int())) h.Write(buf) case pcommon.ValueTypeBytes: h.Write(v.Bytes().AsRaw()) @@ -796,5 +796,5 @@ func safeUint64ToInt64(v uint64) int64 { if v > math.MaxInt64 { return math.MaxInt64 } - return int64(v) // nolint:goset // overflow checked + return int64(v) //nolint:goset // overflow checked } diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go index 2975d877cd1a..34315b2f67b0 100644 --- a/exporter/elasticsearchexporter/model_test.go +++ b/exporter/elasticsearchexporter/model_test.go @@ -1294,7 +1294,7 @@ func TestEncodeLogBodyMapMode(t *testing.T) { resourceLogs := logs.ResourceLogs().AppendEmpty() scopeLogs := resourceLogs.ScopeLogs().AppendEmpty() logRecords := scopeLogs.LogRecords() - observedTimestamp := pcommon.Timestamp(time.Now().UnixNano()) // nolint:gosec // UnixNano is positive and thus safe to convert to signed integer. + observedTimestamp := pcommon.Timestamp(time.Now().UnixNano()) logRecord := logRecords.AppendEmpty() logRecord.SetObservedTimestamp(observedTimestamp) diff --git a/exporter/elasticsearchexporter/pdata_serializer.go b/exporter/elasticsearchexporter/pdata_serializer.go index 9d2fbf82f63c..76eb2a988372 100644 --- a/exporter/elasticsearchexporter/pdata_serializer.go +++ b/exporter/elasticsearchexporter/pdata_serializer.go @@ -298,7 +298,7 @@ func writeAttributes(v *json.Visitor, attributes pcommon.Map, stringifyMapValues _ = v.OnObjectStart(-1, structform.AnyType) attributes.Range(func(k string, val pcommon.Value) bool { switch k { - case dataStreamType, dataStreamDataset, dataStreamNamespace, elasticsearch.MappingHintsAttrKey: + case dataStreamType, dataStreamDataset, dataStreamNamespace, elasticsearch.MappingHintsAttrKey, documentIDAttributeName: return true } if isGeoAttribute(k, val) { diff --git a/exporter/elasticsearchexporter/pdata_serializer_test.go b/exporter/elasticsearchexporter/pdata_serializer_test.go index 6131ebbc6ee1..26d514757fd4 100644 --- a/exporter/elasticsearchexporter/pdata_serializer_test.go +++ b/exporter/elasticsearchexporter/pdata_serializer_test.go @@ -31,6 +31,7 @@ func TestSerializeLog(t *testing.T) { record.Attributes().PutDouble("double", 42.0) record.Attributes().PutInt("int", 42) record.Attributes().PutEmptyBytes("bytes").Append(42) + record.Attributes().PutStr(documentIDAttributeName, "my_id") _ = record.Attributes().PutEmptySlice("slice").FromRaw([]any{42, "foo"}) record.Attributes().PutEmptySlice("map_slice").AppendEmpty().SetEmptyMap().PutStr("foo.bar", "baz") mapAttr := record.Attributes().PutEmptyMap("map") diff --git a/exporter/elasticsearchexporter/utils_test.go b/exporter/elasticsearchexporter/utils_test.go index fc320b36f073..502774bd894b 100644 --- a/exporter/elasticsearchexporter/utils_test.go +++ b/exporter/elasticsearchexporter/utils_test.go @@ -38,12 +38,12 @@ func itemRequestsSortFunc(a, b itemRequest) int { return comp } -func assertRecordedItems(t *testing.T, expected []itemRequest, recorder *bulkRecorder, assertOrder bool) { // nolint:unparam +func assertRecordedItems(t *testing.T, expected []itemRequest, recorder *bulkRecorder, assertOrder bool) { //nolint:unparam recorder.WaitItems(len(expected)) assertItemRequests(t, expected, recorder.Items(), assertOrder) } -func assertItemRequests(t *testing.T, expected, actual []itemRequest, assertOrder bool) { // nolint:unparam +func assertItemRequests(t *testing.T, expected, actual []itemRequest, assertOrder bool) { expectedItems := expected actualItems := actual if !assertOrder { diff --git a/exporter/googlemanagedprometheusexporter/config_test.go b/exporter/googlemanagedprometheusexporter/config_test.go index 5fd62114381b..c71109d54ce7 100644 --- a/exporter/googlemanagedprometheusexporter/config_test.go +++ b/exporter/googlemanagedprometheusexporter/config_test.go @@ -25,8 +25,6 @@ func TestLoadConfig(t *testing.T) { factory := NewFactory() factories.Exporters[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config.yaml"), factories) require.NoError(t, err) diff --git a/exporter/loadbalancingexporter/factory_test.go b/exporter/loadbalancingexporter/factory_test.go index b4d3ff103e5a..2f0e3898e36d 100644 --- a/exporter/loadbalancingexporter/factory_test.go +++ b/exporter/loadbalancingexporter/factory_test.go @@ -77,8 +77,6 @@ func TestBuildExporterConfig(t *testing.T) { require.NoError(t, err) factories.Exporters[metadata.Type] = NewFactory() - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "test-build-exporter-config.yaml"), factories) require.NoError(t, err) require.NotNil(t, cfg) diff --git a/exporter/opensearchexporter/internal/objmodel/objmodel.go b/exporter/opensearchexporter/internal/objmodel/objmodel.go index 40d944d9f882..74172bbea8b9 100644 --- a/exporter/opensearchexporter/internal/objmodel/objmodel.go +++ b/exporter/opensearchexporter/internal/objmodel/objmodel.go @@ -29,7 +29,7 @@ // Ingest Node is used. But either way, we try to present only well formed // document to OpenSearch. -// nolint:errcheck +//nolint:errcheck package objmodel // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opensearchexporter/internal/objmodel" import ( diff --git a/exporter/prometheusexporter/end_to_end_test.go b/exporter/prometheusexporter/end_to_end_test.go index 87a9e1283c84..780abf9e9955 100644 --- a/exporter/prometheusexporter/end_to_end_test.go +++ b/exporter/prometheusexporter/end_to_end_test.go @@ -158,8 +158,7 @@ func TestEndToEndSummarySupport(t *testing.T) { require.Empty(t, prometheusExporterScrape, "Left-over unmatched Prometheus scrape content: %q\n", prometheusExporterScrape) } -// the following triggers G101: Potential hardcoded credentials -// nolint:gosec +//nolint:gosec // the following triggers G101: Potential hardcoded credentials const dropWizardResponse = ` # HELP jvm_memory_pool_bytes_used Used bytes of a given JVM memory pool. # TYPE jvm_memory_pool_bytes_used gauge diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index 6ad7d31981c0..6829bb57201c 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -353,7 +353,10 @@ func (prwe *prwExporter) execute(ctx context.Context, writeReq *prompb.WriteRequ if err != nil { return err } - defer resp.Body.Close() + defer func() { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + }() // 2xx status code is considered a success // 5xx errors are recoverable and the exporter should retry diff --git a/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go b/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go index bf9fcbd968cc..5369b201e526 100644 --- a/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_concurrency_test.go @@ -8,7 +8,7 @@ import ( "io" "net/http" "net/http/httptest" - "os" + "runtime" "strconv" "sync" "testing" @@ -32,9 +32,6 @@ import ( // Test everything works when there is more than one goroutine calling PushMetrics. // Today we only use 1 worker per exporter, but the intention of this test is to future-proof in case it changes. func Test_PushMetricsConcurrent(t *testing.T) { - if os.Getenv("ImageOs") == "win25" && os.Getenv("GITHUB_ACTIONS") == "true" { - t.Skip("Skipping test on Windows 2025 GH runners, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/37104") - } n := 1000 ms := make([]pmetric.Metrics, n) testIDKey := "test_id" @@ -137,15 +134,22 @@ func Test_PushMetricsConcurrent(t *testing.T) { resp, checkRequestErr := http.Get(server.URL) require.NoError(c, checkRequestErr) assert.NoError(c, resp.Body.Close()) - }, 5*time.Second, 100*time.Millisecond) + }, 15*time.Second, 100*time.Millisecond) var wg sync.WaitGroup wg.Add(n) + maxConcurrentGoroutines := runtime.NumCPU() * 4 + semaphore := make(chan struct{}, maxConcurrentGoroutines) for _, m := range ms { + semaphore <- struct{}{} go func() { + defer func() { + <-semaphore + wg.Done() + }() + err := prwe.PushMetrics(ctx, m) assert.NoError(t, err) - wg.Done() }() } wg.Wait() diff --git a/exporter/signalfxexporter/internal/apm/correlations/client_test.go b/exporter/signalfxexporter/internal/apm/correlations/client_test.go index 055f32d14e8c..0612870f7bdf 100644 --- a/exporter/signalfxexporter/internal/apm/correlations/client_test.go +++ b/exporter/signalfxexporter/internal/apm/correlations/client_test.go @@ -30,7 +30,7 @@ var ( deletePathRegexp = regexp.MustCompile(`/v2/apm/correlate/([^/]+)/([^/]+)/([^/]+)/([^/]+)`) // /dimName/dimValue/{service,environment}/value ) -func waitForCors(corCh <-chan *request, count, waitSeconds int) []*request { // nolint: unparam +func waitForCors(corCh <-chan *request, count, waitSeconds int) []*request { //nolint:unparam cors := make([]*request, 0, count) timeout := time.After(time.Duration(waitSeconds) * time.Second) diff --git a/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go b/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go index c486f9f8ce12..9bf88543d2ce 100644 --- a/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go +++ b/exporter/signalfxexporter/internal/apm/tracetracker/tracker.go @@ -164,7 +164,7 @@ func (a *ActiveServiceTracker) processEnvironment(res pcommon.Resource, now time if err == nil { a.hostEnvironmentCache.UpdateOrCreate(&CacheKey{value: environment}, now) } - // nolint:errorlint + //nolint:errorlint if maxEntry, ok := err.(*correlations.ErrMaxEntries); ok && maxEntry.MaxEntries > 0 { a.hostEnvironmentCache.SetMaxSize(maxEntry.MaxEntries, now) } @@ -221,7 +221,7 @@ func (a *ActiveServiceTracker) processService(res pcommon.Resource, now time.Tim if err == nil { a.hostServiceCache.UpdateOrCreate(&CacheKey{value: service}, now) } - // nolint:errorlint + //nolint:errorlint if maxEntry, ok := err.(*correlations.ErrMaxEntries); ok && maxEntry.MaxEntries > 0 { a.hostServiceCache.SetMaxSize(maxEntry.MaxEntries, now) } diff --git a/extension/jaegerremotesampling/factory.go b/extension/jaegerremotesampling/factory.go index 6cd5ce005b3f..c513ef234277 100644 --- a/extension/jaegerremotesampling/factory.go +++ b/extension/jaegerremotesampling/factory.go @@ -52,7 +52,7 @@ func logDeprecation(logger *zap.Logger) { }) } -// nolint +//nolint:unused var protoGate = featuregate.GlobalRegistry().MustRegister( "extension.jaegerremotesampling.replaceThriftWithProto", featuregate.StageStable, diff --git a/internal/aws/metrics/metric_calculator_test.go b/internal/aws/metrics/metric_calculator_test.go index 759d15ee4835..a963df91e425 100644 --- a/internal/aws/metrics/metric_calculator_test.go +++ b/internal/aws/metrics/metric_calculator_test.go @@ -276,7 +276,7 @@ func TestSweep(t *testing.T) { assert.LessOrEqual(t, time.Since(sweepTime), mwe.ttl) } require.NoError(t, mwe.Shutdown()) - for range sweepEvent { // nolint + for range sweepEvent { //nolint:revive } assert.True(t, closed.Load(), "Sweeper did not terminate.") } diff --git a/internal/otelarrow/test/e2e_test.go b/internal/otelarrow/test/e2e_test.go index 9c5f75d64da0..5266e4cb1ce0 100644 --- a/internal/otelarrow/test/e2e_test.go +++ b/internal/otelarrow/test/e2e_test.go @@ -281,7 +281,7 @@ func makeTestTraces(i int) ptrace.Traces { func bulkyGenFunc() MkGen { return func() GenFunc { - entropy := datagen.NewTestEntropy(int64(rand.Uint64())) //nolint:gosec // only used for testing + entropy := datagen.NewTestEntropy(int64(rand.Uint64())) tracesGen := datagen.NewTracesGenerator( entropy, @@ -627,7 +627,7 @@ func nearLimitGenFunc() MkGen { const hardLimit = 1 << 20 // 1 MiB return func() GenFunc { - entropy := datagen.NewTestEntropy(int64(rand.Uint64())) //nolint:gosec // only used for testing + entropy := datagen.NewTestEntropy(int64(rand.Uint64())) tracesGen := datagen.NewTracesGenerator( entropy, diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 9a124f065083..d380c9f989dc 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -14,6 +14,7 @@ require ( go.opentelemetry.io/build-tools/checkfile v0.16.0 go.opentelemetry.io/build-tools/chloggen v0.16.0 go.opentelemetry.io/build-tools/crosslink v0.16.0 + go.opentelemetry.io/build-tools/githubgen v0.17.0 go.opentelemetry.io/build-tools/issuegenerator v0.16.0 go.opentelemetry.io/build-tools/multimod v0.16.0 go.opentelemetry.io/collector/cmd/builder v0.118.1-0.20250121185328-fbefb22cc2b3 @@ -107,7 +108,8 @@ require ( github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-github v17.0.0+incompatible // indirect - github.com/google/go-querystring v1.0.0 // indirect + github.com/google/go-github/v66 v66.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect diff --git a/internal/tools/go.sum b/internal/tools/go.sum index 67cf5bc624cc..4756920ab426 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -227,8 +227,10 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= +github.com/google/go-github/v66 v66.0.0/go.mod h1:+4SO9Zkuyf8ytMj0csN1NR/5OTR+MfqPp8P8dVlcvY4= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= @@ -572,6 +574,8 @@ go.opentelemetry.io/build-tools/chloggen v0.16.0 h1:iuotHxlcK46JJtQLdwQPsC5dcAyg go.opentelemetry.io/build-tools/chloggen v0.16.0/go.mod h1:Wk92v9Wsv36sXYi7hOg3ndeeLKmKBu0/kgB7wcaeqJg= go.opentelemetry.io/build-tools/crosslink v0.16.0 h1:7Y5QPt5TR3qpiW5bwIOnsCJdt1yjZSFDPKtjt2g2zHw= go.opentelemetry.io/build-tools/crosslink v0.16.0/go.mod h1:xogE6iWmt53bsDazb81dQrZw9TQ30+9hc4D8QfVG9aA= +go.opentelemetry.io/build-tools/githubgen v0.17.0 h1:2DFeHglGmrtTMoRDfJoR+iJd/I/3eCm8FtQJpZAb3W8= +go.opentelemetry.io/build-tools/githubgen v0.17.0/go.mod h1:UBpPXtso7exy3VU5EH1ZFfSkYQANJWO/u1lO50qdKkE= go.opentelemetry.io/build-tools/issuegenerator v0.16.0 h1:Ka14LdI0suh63HR25gy0deuspgBThl/z95e/LIIaiRw= go.opentelemetry.io/build-tools/issuegenerator v0.16.0/go.mod h1:JfdlpwSKBYbwdTOdGGlCRw4vptEgnYPhApnACBps8N8= go.opentelemetry.io/build-tools/multimod v0.16.0 h1:o205mGH61VXebXeiIPXwxeHRCm332TZqRb8gmypI6po= diff --git a/internal/tools/tools.go b/internal/tools/tools.go index 2dcc07248445..6d75302b13b8 100644 --- a/internal/tools/tools.go +++ b/internal/tools/tools.go @@ -22,6 +22,7 @@ import ( _ "go.opentelemetry.io/build-tools/checkfile" _ "go.opentelemetry.io/build-tools/chloggen" _ "go.opentelemetry.io/build-tools/crosslink" + _ "go.opentelemetry.io/build-tools/githubgen" _ "go.opentelemetry.io/build-tools/issuegenerator" _ "go.opentelemetry.io/build-tools/multimod" _ "go.opentelemetry.io/collector/cmd/builder" diff --git a/pkg/experimentalmetricmetadata/metadata.go b/pkg/experimentalmetricmetadata/metadata.go index 3cbf15df4515..242170c222fa 100644 --- a/pkg/experimentalmetricmetadata/metadata.go +++ b/pkg/experimentalmetricmetadata/metadata.go @@ -6,7 +6,7 @@ package experimentalmetricmetadata // import "github.com/open-telemetry/opentele // MetadataExporter provides an interface to implement // ConsumeMetadata in Exporters that support metadata. // Type, functionality, and interface not guaranteed to be stable or permanent. -type MetadataExporter interface { //nolint +type MetadataExporter interface { // ConsumeMetadata will be invoked every time there's an // update to a resource that results in one or more MetadataUpdate. ConsumeMetadata(metadata []*MetadataUpdate) error @@ -34,7 +34,7 @@ type ResourceID string // Apart from Kubernetes labels, the other metadata collected by this // receiver are also handled in the same manner. // Type, functionality, and fields not guaranteed to be stable or permanent. -type MetadataDelta struct { //nolint +type MetadataDelta struct { // MetadataToAdd contains key-value pairs that are newly added to // the resource description in the current revision. MetadataToAdd map[string]string @@ -49,7 +49,7 @@ type MetadataDelta struct { //nolint // MetadataUpdate provides a delta view of metadata on a resource between // two revisions of a resource. // Type, functionality, and fields not guaranteed to be stable or permanent. -type MetadataUpdate struct { //nolint +type MetadataUpdate struct { // ResourceIDKey is the label key of UID label for the resource. ResourceIDKey string // ResourceID is the Kubernetes UID of the resource. In case of diff --git a/pkg/ottl/expression_test.go b/pkg/ottl/expression_test.go index e5cffbfcd547..b215a79e9e07 100644 --- a/pkg/ottl/expression_test.go +++ b/pkg/ottl/expression_test.go @@ -1050,7 +1050,7 @@ func Test_FunctionGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardStringGetter_WrappedError(t *testing.T) { getter := StandardStringGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -1197,7 +1197,7 @@ func Test_StandardStringLikeGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardStringLikeGetter_WrappedError(t *testing.T) { getter := StandardStringLikeGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -1274,7 +1274,7 @@ func Test_StandardFloatGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardFloatGetter_WrappedError(t *testing.T) { getter := StandardFloatGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -1451,7 +1451,7 @@ func Test_StandardFloatLikeGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardFloatLikeGetter_WrappedError(t *testing.T) { getter := StandardFloatLikeGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -1528,7 +1528,7 @@ func Test_StandardIntGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardIntGetter_WrappedError(t *testing.T) { getter := StandardIntGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -1705,7 +1705,7 @@ func Test_StandardIntLikeGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardIntLikeGetter_WrappedError(t *testing.T) { getter := StandardIntLikeGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -1904,7 +1904,7 @@ func Test_StandardByteSliceLikeGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardByteSliceLikeGetter_WrappedError(t *testing.T) { getter := StandardByteSliceLikeGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -1981,7 +1981,7 @@ func Test_StandardBoolGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardBoolGetter_WrappedError(t *testing.T) { getter := StandardBoolGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -2137,7 +2137,7 @@ func Test_StandardBoolLikeGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardBoolLikeGetter_WrappedError(t *testing.T) { getter := StandardBoolLikeGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -2224,7 +2224,7 @@ func Test_StandardPMapGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardPMapGetter_WrappedError(t *testing.T) { getter := StandardPMapGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -2329,7 +2329,7 @@ func Test_StandardDurationGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardDurationGetter_WrappedError(t *testing.T) { getter := StandardDurationGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { @@ -2418,7 +2418,7 @@ func Test_StandardTimeGetter(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_StandardTimeGetter_WrappedError(t *testing.T) { getter := StandardTimeGetter[any]{ Getter: func(_ context.Context, _ any) (any, error) { diff --git a/pkg/ottl/factory.go b/pkg/ottl/factory.go index f4ff94f1872d..849ac68d7847 100644 --- a/pkg/ottl/factory.go +++ b/pkg/ottl/factory.go @@ -41,7 +41,7 @@ type factory[K any] struct { createFunctionFunc CreateFunctionFunc[K] } -// nolint:unused +//nolint:unused func (f *factory[K]) unexportedFactoryFunc() {} func (f *factory[K]) Name() string { diff --git a/pkg/ottl/functions.go b/pkg/ottl/functions.go index 5740328fa1c2..3703be6a422a 100644 --- a/pkg/ottl/functions.go +++ b/pkg/ottl/functions.go @@ -697,7 +697,6 @@ type Optional[T any] struct { } // This is called only by reflection. -// nolint:unused func (o Optional[T]) set(val any) reflect.Value { return reflect.ValueOf(Optional[T]{ val: val.(T), diff --git a/pkg/ottl/ottlfuncs/func_is_bool.go b/pkg/ottl/ottlfuncs/func_is_bool.go index b2845e919c33..c39752de8424 100644 --- a/pkg/ottl/ottlfuncs/func_is_bool.go +++ b/pkg/ottl/ottlfuncs/func_is_bool.go @@ -28,7 +28,7 @@ func createIsBoolFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) ( return isBool(args.Target), nil } -// nolint:errorlint +//nolint:errorlint func isBool[K any](target ottl.BoolGetter[K]) ottl.ExprFunc[K] { return func(ctx context.Context, tCtx K) (any, error) { _, err := target.Get(ctx, tCtx) diff --git a/pkg/ottl/ottlfuncs/func_is_bool_test.go b/pkg/ottl/ottlfuncs/func_is_bool_test.go index 4cb780385923..1e349ac5fbf1 100644 --- a/pkg/ottl/ottlfuncs/func_is_bool_test.go +++ b/pkg/ottl/ottlfuncs/func_is_bool_test.go @@ -59,7 +59,7 @@ func Test_IsBool(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_IsBool_Error(t *testing.T) { exprFunc := isBool[any](&ottl.StandardBoolGetter[any]{ Getter: func(context.Context, any) (any, error) { diff --git a/pkg/ottl/ottlfuncs/func_is_double.go b/pkg/ottl/ottlfuncs/func_is_double.go index 5ae4f4ac31c4..7ca3a70e4526 100644 --- a/pkg/ottl/ottlfuncs/func_is_double.go +++ b/pkg/ottl/ottlfuncs/func_is_double.go @@ -28,7 +28,7 @@ func createIsDoubleFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) return isDouble(args.Target), nil } -// nolint:errorlint +//nolint:errorlint func isDouble[K any](target ottl.FloatGetter[K]) ottl.ExprFunc[K] { return func(ctx context.Context, tCtx K) (any, error) { _, err := target.Get(ctx, tCtx) diff --git a/pkg/ottl/ottlfuncs/func_is_double_test.go b/pkg/ottl/ottlfuncs/func_is_double_test.go index f0e3e7409fc1..3926a5230781 100644 --- a/pkg/ottl/ottlfuncs/func_is_double_test.go +++ b/pkg/ottl/ottlfuncs/func_is_double_test.go @@ -69,7 +69,7 @@ func Test_IsDouble(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_IsDouble_Error(t *testing.T) { exprFunc := isDouble[any](&ottl.StandardFloatGetter[any]{ Getter: func(context.Context, any) (any, error) { diff --git a/pkg/ottl/ottlfuncs/func_is_int.go b/pkg/ottl/ottlfuncs/func_is_int.go index 9b392012243e..6725d5231000 100644 --- a/pkg/ottl/ottlfuncs/func_is_int.go +++ b/pkg/ottl/ottlfuncs/func_is_int.go @@ -28,7 +28,7 @@ func createIsIntFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (o return isInt(args.Target), nil } -// nolint:errorlint +//nolint:errorlint func isInt[K any](target ottl.IntGetter[K]) ottl.ExprFunc[K] { return func(ctx context.Context, tCtx K) (any, error) { _, err := target.Get(ctx, tCtx) diff --git a/pkg/ottl/ottlfuncs/func_is_int_test.go b/pkg/ottl/ottlfuncs/func_is_int_test.go index 18566236e8dc..44b6af7e0044 100644 --- a/pkg/ottl/ottlfuncs/func_is_int_test.go +++ b/pkg/ottl/ottlfuncs/func_is_int_test.go @@ -74,7 +74,7 @@ func Test_IsInt(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_IsInt_Error(t *testing.T) { exprFunc := isInt[any](&ottl.StandardIntGetter[any]{ Getter: func(context.Context, any) (any, error) { diff --git a/pkg/ottl/ottlfuncs/func_is_map.go b/pkg/ottl/ottlfuncs/func_is_map.go index 3e36056b748e..98fb9c1d176a 100644 --- a/pkg/ottl/ottlfuncs/func_is_map.go +++ b/pkg/ottl/ottlfuncs/func_is_map.go @@ -28,7 +28,7 @@ func createIsMapFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (o return isMap(args.Target), nil } -// nolint:errorlint +//nolint:errorlint func isMap[K any](target ottl.PMapGetter[K]) ottl.ExprFunc[K] { return func(ctx context.Context, tCtx K) (any, error) { _, err := target.Get(ctx, tCtx) diff --git a/pkg/ottl/ottlfuncs/func_is_map_test.go b/pkg/ottl/ottlfuncs/func_is_map_test.go index 65936196504e..7489401120af 100644 --- a/pkg/ottl/ottlfuncs/func_is_map_test.go +++ b/pkg/ottl/ottlfuncs/func_is_map_test.go @@ -59,7 +59,7 @@ func Test_IsMap(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_IsMap_Error(t *testing.T) { exprFunc := isMap[any](&ottl.StandardPMapGetter[any]{ Getter: func(context.Context, any) (any, error) { diff --git a/pkg/ottl/ottlfuncs/func_is_string.go b/pkg/ottl/ottlfuncs/func_is_string.go index d2e5c00650cf..0103e2773014 100644 --- a/pkg/ottl/ottlfuncs/func_is_string.go +++ b/pkg/ottl/ottlfuncs/func_is_string.go @@ -28,7 +28,7 @@ func createIsStringFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) return isString(args.Target), nil } -// nolint:errorlint +//nolint:errorlint func isString[K any](target ottl.StringGetter[K]) ottl.ExprFunc[K] { return func(ctx context.Context, tCtx K) (any, error) { _, err := target.Get(ctx, tCtx) diff --git a/pkg/ottl/ottlfuncs/func_is_string_test.go b/pkg/ottl/ottlfuncs/func_is_string_test.go index 5f91b7475ed4..99b6039e06c6 100644 --- a/pkg/ottl/ottlfuncs/func_is_string_test.go +++ b/pkg/ottl/ottlfuncs/func_is_string_test.go @@ -59,7 +59,7 @@ func Test_IsString(t *testing.T) { } } -// nolint:errorlint +//nolint:errorlint func Test_IsString_Error(t *testing.T) { exprFunc := isString[any](&ottl.StandardStringGetter[any]{ Getter: func(context.Context, any) (any, error) { diff --git a/pkg/ottl/ottlfuncs/func_len.go b/pkg/ottl/ottlfuncs/func_len.go index d45947cfc4ef..e9c7c912aebb 100644 --- a/pkg/ottl/ottlfuncs/func_len.go +++ b/pkg/ottl/ottlfuncs/func_len.go @@ -38,7 +38,6 @@ func createLenFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ott return computeLen(args.Target), nil } -// nolint:exhaustive func computeLen[K any](target ottl.Getter[K]) ottl.ExprFunc[K] { return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) diff --git a/pkg/ottl/ottlfuncs/func_len_test.go b/pkg/ottl/ottlfuncs/func_len_test.go index 5c011dbdd317..db11db23a6cd 100644 --- a/pkg/ottl/ottlfuncs/func_len_test.go +++ b/pkg/ottl/ottlfuncs/func_len_test.go @@ -303,7 +303,7 @@ func dummyMap(size int) map[string]any { return m } -// nolint:errorlint +//nolint:errorlint func Test_Len_Error(t *testing.T) { exprFunc := computeLen[any](&ottl.StandardGetSetter[any]{ Getter: func(context.Context, any) (any, error) { diff --git a/pkg/stanza/adapter/converter.go b/pkg/stanza/adapter/converter.go index a81fd8f00a42..e007ae4b7fda 100644 --- a/pkg/stanza/adapter/converter.go +++ b/pkg/stanza/adapter/converter.go @@ -274,7 +274,7 @@ func HashResource(resource map[string]any) uint64 { case []byte: hw.h.Write(t) //nolint:errcheck case bool, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: - binary.Write(hw.h, binary.BigEndian, t) // nolint - nothing to do about it + binary.Write(hw.h, binary.BigEndian, t) //nolint:errcheck // nothing to do about it default: b, _ := json.Marshal(t) hw.h.Write(b) //nolint:errcheck diff --git a/pkg/stanza/operator/input/udp/input.go b/pkg/stanza/operator/input/udp/input.go index f718e187f15b..83a560aa9e5d 100644 --- a/pkg/stanza/operator/input/udp/input.go +++ b/pkg/stanza/operator/input/udp/input.go @@ -239,7 +239,7 @@ func (i *Input) readMessage(buffer []byte) ([]byte, net.Addr, int, error) { // This will remove trailing characters and NULs from the buffer func (i *Input) removeTrailingCharactersAndNULsFromBuffer(buffer []byte, n int) []byte { // Remove trailing characters and NULs - for ; (n > 0) && (buffer[n-1] < 32); n-- { // nolint + for ; (n > 0) && (buffer[n-1] < 32); n-- { //nolint:revive } return buffer[:n] diff --git a/processor/cumulativetodeltaprocessor/README.md b/processor/cumulativetodeltaprocessor/README.md index 6963e47000ae..ed6de34eaf4e 100644 --- a/processor/cumulativetodeltaprocessor/README.md +++ b/processor/cumulativetodeltaprocessor/README.md @@ -23,8 +23,8 @@ Configuration is specified through a list of metrics. The processor uses metric The following settings can be optionally configured: -- `include`: List of metrics names or patterns to convert to delta. -- `exclude`: List of metrics names or patterns to not convert to delta. **If a metric name matches both include and exclude, exclude takes precedence.** +- `include`: List of metrics names (case-insensitive), patterns or metric types to convert to delta. Valid values are: `sum`, `histogram`. +- `exclude`: List of metrics names (case-insensitive), patterns or metric types to not convert to delta. **If a metric name matches both include and exclude, exclude takes precedence.** Valid values are: `sum`, `histogram`. - `max_staleness`: The total time a state entry will live past the time it was last seen. Set to 0 to retain state indefinitely. Default: 0 - `initial_value`: Handling of the first observed point for a given metric identity. When the collector (re)starts, there's no record of how much of a given cumulative counter has already been converted to delta values. @@ -56,6 +56,17 @@ processors: match_type: strict ``` +```yaml +processors: + # processor name: cumulativetodelta + cumulativetodelta: + + # Convert all sum metrics + include: + metric_types: + - sum +``` + ```yaml processors: # processor name: cumulativetodelta @@ -69,6 +80,21 @@ processors: match_type: regexp ``` +```yaml +processors: + # processor name: cumulativetodelta + cumulativetodelta: + + # Convert cumulative sum metrics to delta + # if and only if 'metric' is in the name + include: + metrics: + - ".*metric.*" + match_type: regexp + metric_types: + - sum +``` + ```yaml processors: # processor name: cumulativetodelta @@ -82,6 +108,22 @@ processors: match_type: regexp ``` +```yaml +processors: + # processor name: cumulativetodelta + cumulativetodelta: + + # Convert cumulative sum metrics with 'metric' in their name, + # but exclude histogram metrics + include: + metrics: + - ".*metric.*" + match_type: regexp + exclude: + metric_types: + - histogram +``` + ```yaml processors: # processor name: cumulativetodelta diff --git a/processor/cumulativetodeltaprocessor/config.go b/processor/cumulativetodeltaprocessor/config.go index dcba656c838d..adcc81090f2d 100644 --- a/processor/cumulativetodeltaprocessor/config.go +++ b/processor/cumulativetodeltaprocessor/config.go @@ -5,14 +5,24 @@ package cumulativetodeltaprocessor // import "github.com/open-telemetry/opentele import ( "fmt" + "strings" "time" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pmetric" + "golang.org/x/exp/maps" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor/internal/tracking" ) +var validMetricTypes = map[string]bool{ + strings.ToLower(pmetric.MetricTypeSum.String()): true, + strings.ToLower(pmetric.MetricTypeHistogram.String()): true, +} + +var validMetricTypeList = maps.Keys(validMetricTypes) + // Config defines the configuration for the processor. type Config struct { // MaxStaleness is the total time a state entry will live past the time it was last seen. Set to 0 to retain state indefinitely. @@ -37,6 +47,8 @@ type MatchMetrics struct { filterset.Config `mapstructure:",squash"` Metrics []string `mapstructure:"metrics"` + + MetricTypes []string `mapstructure:"metric_types"` } var _ component.Config = (*Config)(nil) @@ -52,5 +64,24 @@ func (config *Config) Validate() error { (len(config.Exclude.MatchType) > 0 && len(config.Exclude.Metrics) == 0) { return fmt.Errorf("metrics must be supplied if match_type is set") } + + for _, metricType := range config.Exclude.MetricTypes { + if valid := validMetricTypes[strings.ToLower(metricType)]; !valid { + return fmt.Errorf( + "found invalid metric type in exclude.metric_types: %s. Valid values are %s", + metricType, + validMetricTypeList, + ) + } + } + for _, metricType := range config.Include.MetricTypes { + if valid := validMetricTypes[strings.ToLower(metricType)]; !valid { + return fmt.Errorf( + "found invalid metric type in include.metric_types: %s. Valid values are %s", + metricType, + validMetricTypeList, + ) + } + } return nil } diff --git a/processor/cumulativetodeltaprocessor/config_test.go b/processor/cumulativetodeltaprocessor/config_test.go index 97c3f8952077..337c2fc13c37 100644 --- a/processor/cumulativetodeltaprocessor/config_test.go +++ b/processor/cumulativetodeltaprocessor/config_test.go @@ -4,6 +4,7 @@ package cumulativetodeltaprocessor import ( + "fmt" "path/filepath" "testing" "time" @@ -82,6 +83,45 @@ func TestLoadConfig(t *testing.T) { InitialValue: tracking.InitialValueAuto, }, }, + { + id: component.NewIDWithName(metadata.Type, "metric_type_filter"), + expected: &Config{ + Include: MatchMetrics{ + Metrics: []string{ + "a*", + }, + Config: filterset.Config{ + MatchType: "regexp", + RegexpConfig: nil, + }, + MetricTypes: []string{ + "sum", + }, + }, + Exclude: MatchMetrics{ + Metrics: []string{ + "b*", + }, + Config: filterset.Config{ + MatchType: "regexp", + RegexpConfig: nil, + }, + MetricTypes: []string{ + "histogram", + }, + }, + MaxStaleness: 10 * time.Second, + InitialValue: tracking.InitialValueAuto, + }, + }, + { + id: component.NewIDWithName(metadata.Type, "invalid_include_metric_type_filter"), + errorMessage: fmt.Sprintf("found invalid metric type in include.metric_types: gauge. Valid values are %s", validMetricTypeList), + }, + { + id: component.NewIDWithName(metadata.Type, "invalid_exclude_metric_type_filter"), + errorMessage: fmt.Sprintf("found invalid metric type in exclude.metric_types: Invalid. Valid values are %s", validMetricTypeList), + }, { id: component.NewIDWithName(metadata.Type, "missing_match_type"), errorMessage: "match_type must be set if metrics are supplied", diff --git a/processor/cumulativetodeltaprocessor/factory.go b/processor/cumulativetodeltaprocessor/factory.go index 24ffc9c3e334..21a0af4a04b5 100644 --- a/processor/cumulativetodeltaprocessor/factory.go +++ b/processor/cumulativetodeltaprocessor/factory.go @@ -40,7 +40,10 @@ func createMetricsProcessor( return nil, fmt.Errorf("configuration parsing error") } - metricsProcessor := newCumulativeToDeltaProcessor(processorConfig, set.Logger) + metricsProcessor, err := newCumulativeToDeltaProcessor(processorConfig, set.Logger) + if err != nil { + return nil, err + } return processorhelper.NewMetrics( ctx, diff --git a/processor/cumulativetodeltaprocessor/factory_test.go b/processor/cumulativetodeltaprocessor/factory_test.go index 6926b4257d6d..b309bc430396 100644 --- a/processor/cumulativetodeltaprocessor/factory_test.go +++ b/processor/cumulativetodeltaprocessor/factory_test.go @@ -6,6 +6,7 @@ package cumulativetodeltaprocessor import ( "context" "path/filepath" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -61,6 +62,12 @@ func TestCreateProcessors(t *testing.T) { processortest.NewNopSettings(), cfg, consumertest.NewNop()) + + if strings.Contains(k, "invalid") { + assert.Error(t, mErr) + assert.Nil(t, mp) + return + } assert.NotNil(t, mp) assert.NoError(t, mErr) assert.NoError(t, mp.Shutdown(context.Background())) diff --git a/processor/cumulativetodeltaprocessor/go.mod b/processor/cumulativetodeltaprocessor/go.mod index 50c40922c46e..033c5cbbf8ee 100644 --- a/processor/cumulativetodeltaprocessor/go.mod +++ b/processor/cumulativetodeltaprocessor/go.mod @@ -16,6 +16,7 @@ require ( go.opentelemetry.io/collector/processor/processortest v0.118.1-0.20250121185328-fbefb22cc2b3 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 ) require ( diff --git a/processor/cumulativetodeltaprocessor/go.sum b/processor/cumulativetodeltaprocessor/go.sum index 227311851b1e..a80ae09f6ab8 100644 --- a/processor/cumulativetodeltaprocessor/go.sum +++ b/processor/cumulativetodeltaprocessor/go.sum @@ -105,6 +105,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= diff --git a/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go b/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go index 3412efc5849b..8acacab65938 100644 --- a/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go +++ b/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go @@ -334,7 +334,7 @@ func Test_metricTracker_sweeper(t *testing.T) { assert.LessOrEqual(t, tr.maxStaleness, time.Since(staleBefore.AsTime())) } cancel() - for range sweepEvent { // nolint + for range sweepEvent { //nolint:revive } assert.True(t, closed.Load(), "Sweeper did not terminate.") } diff --git a/processor/cumulativetodeltaprocessor/processor.go b/processor/cumulativetodeltaprocessor/processor.go index 0c7673a9a169..78bfbaf3fd1c 100644 --- a/processor/cumulativetodeltaprocessor/processor.go +++ b/processor/cumulativetodeltaprocessor/processor.go @@ -5,7 +5,9 @@ package cumulativetodeltaprocessor // import "github.com/open-telemetry/opentele import ( "context" + "fmt" "math" + "strings" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" @@ -15,19 +17,21 @@ import ( ) type cumulativeToDeltaProcessor struct { - includeFS filterset.FilterSet - excludeFS filterset.FilterSet - logger *zap.Logger - deltaCalculator *tracking.MetricTracker - cancelFunc context.CancelFunc + includeFS filterset.FilterSet + excludeFS filterset.FilterSet + includeMetricTypes map[pmetric.MetricType]bool + excludeMetricTypes map[pmetric.MetricType]bool + logger *zap.Logger + deltaCalculator *tracking.MetricTracker + cancelFunc context.CancelFunc } -func newCumulativeToDeltaProcessor(config *Config, logger *zap.Logger) *cumulativeToDeltaProcessor { +func newCumulativeToDeltaProcessor(config *Config, logger *zap.Logger) (*cumulativeToDeltaProcessor, error) { ctx, cancel := context.WithCancel(context.Background()) + p := &cumulativeToDeltaProcessor{ - logger: logger, - deltaCalculator: tracking.NewMetricTracker(ctx, logger, config.MaxStaleness, config.InitialValue), - cancelFunc: cancel, + logger: logger, + cancelFunc: cancel, } if len(config.Include.Metrics) > 0 { p.includeFS, _ = filterset.CreateFilterSet(config.Include.Metrics, &config.Include.Config) @@ -35,7 +39,41 @@ func newCumulativeToDeltaProcessor(config *Config, logger *zap.Logger) *cumulati if len(config.Exclude.Metrics) > 0 { p.excludeFS, _ = filterset.CreateFilterSet(config.Exclude.Metrics, &config.Exclude.Config) } - return p + + if len(config.Include.MetricTypes) > 0 { + includeMetricTypeFilter, err := getMetricTypeFilter(config.Include.MetricTypes) + if err != nil { + return nil, err + } + p.includeMetricTypes = includeMetricTypeFilter + } + + if len(config.Exclude.MetricTypes) > 0 { + excludeMetricTypeFilter, err := getMetricTypeFilter(config.Exclude.MetricTypes) + if err != nil { + return nil, err + } + p.excludeMetricTypes = excludeMetricTypeFilter + } + + p.deltaCalculator = tracking.NewMetricTracker(ctx, logger, config.MaxStaleness, config.InitialValue) + + return p, nil +} + +func getMetricTypeFilter(types []string) (map[pmetric.MetricType]bool, error) { + res := map[pmetric.MetricType]bool{} + for _, t := range types { + switch strings.ToLower(t) { + case strings.ToLower(pmetric.MetricTypeSum.String()): + res[pmetric.MetricTypeSum] = true + case strings.ToLower(pmetric.MetricTypeHistogram.String()): + res[pmetric.MetricTypeHistogram] = true + default: + return nil, fmt.Errorf("unsupported metric type filter: %s", t) + } + } + return res, nil } // processMetrics implements the ProcessMetricsFunc type. @@ -43,7 +81,7 @@ func (ctdp *cumulativeToDeltaProcessor) processMetrics(_ context.Context, md pme md.ResourceMetrics().RemoveIf(func(rm pmetric.ResourceMetrics) bool { rm.ScopeMetrics().RemoveIf(func(ilm pmetric.ScopeMetrics) bool { ilm.Metrics().RemoveIf(func(m pmetric.Metric) bool { - if !ctdp.shouldConvertMetric(m.Name()) { + if !ctdp.shouldConvertMetric(m) { return false } switch m.Type() { @@ -111,9 +149,11 @@ func (ctdp *cumulativeToDeltaProcessor) shutdown(context.Context) error { return nil } -func (ctdp *cumulativeToDeltaProcessor) shouldConvertMetric(metricName string) bool { - return (ctdp.includeFS == nil || ctdp.includeFS.Matches(metricName)) && - (ctdp.excludeFS == nil || !ctdp.excludeFS.Matches(metricName)) +func (ctdp *cumulativeToDeltaProcessor) shouldConvertMetric(metric pmetric.Metric) bool { + return (ctdp.includeFS == nil || ctdp.includeFS.Matches(metric.Name())) && + (len(ctdp.includeMetricTypes) == 0 || ctdp.includeMetricTypes[metric.Type()]) && + (ctdp.excludeFS == nil || !ctdp.excludeFS.Matches(metric.Name())) && + (len(ctdp.excludeMetricTypes) == 0 || !ctdp.excludeMetricTypes[metric.Type()]) } func (ctdp *cumulativeToDeltaProcessor) convertNumberDataPoints(dps pmetric.NumberDataPointSlice, baseIdentity tracking.MetricIdentity) { diff --git a/processor/cumulativetodeltaprocessor/processor_test.go b/processor/cumulativetodeltaprocessor/processor_test.go index d7b0a19ab6cc..c36a8f06cb0b 100644 --- a/processor/cumulativetodeltaprocessor/processor_test.go +++ b/processor/cumulativetodeltaprocessor/processor_test.go @@ -5,6 +5,7 @@ package cumulativetodeltaprocessor import ( "context" + "errors" "math" "testing" "time" @@ -35,6 +36,30 @@ type testSumMetric struct { flags [][]pmetric.DataPointFlags } +func (tm testSumMetric) addToMetrics(ms pmetric.MetricSlice, now time.Time) { + for i, name := range tm.metricNames { + m := ms.AppendEmpty() + m.SetName(name) + sum := m.SetEmptySum() + sum.SetIsMonotonic(tm.isMonotonic[i]) + + if tm.isCumulative[i] { + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + } else { + sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + } + + for index, value := range tm.metricValues[i] { + dp := m.Sum().DataPoints().AppendEmpty() + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) + dp.SetDoubleValue(value) + if len(tm.flags) > i && len(tm.flags[i]) > index { + dp.SetFlags(tm.flags[i][index]) + } + } + } +} + type testHistogramMetric struct { metricNames []string metricCounts [][]uint64 @@ -46,12 +71,54 @@ type testHistogramMetric struct { flags [][]pmetric.DataPointFlags } +func (tm testHistogramMetric) addToMetrics(ms pmetric.MetricSlice, now time.Time) { + for i, name := range tm.metricNames { + m := ms.AppendEmpty() + m.SetName(name) + hist := m.SetEmptyHistogram() + + if tm.isCumulative[i] { + hist.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + } else { + hist.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + } + + for index, count := range tm.metricCounts[i] { + dp := m.Histogram().DataPoints().AppendEmpty() + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) + dp.SetCount(count) + + sums := tm.metricSums[i] + if len(sums) > 0 { + dp.SetSum(sums[index]) + } + if tm.metricMins != nil { + mins := tm.metricMins[i] + if len(mins) > 0 { + dp.SetMin(mins[index]) + } + } + if tm.metricMaxes != nil { + maxes := tm.metricMaxes[i] + if len(maxes) > 0 { + dp.SetMax(maxes[index]) + } + } + dp.BucketCounts().FromRaw(tm.metricBuckets[i][index]) + if len(tm.flags) > i && len(tm.flags[i]) > index { + dp.SetFlags(tm.flags[i][index]) + } + } + } +} + type cumulativeToDeltaTest struct { name string include MatchMetrics exclude MatchMetrics inMetrics pmetric.Metrics outMetrics pmetric.Metrics + wantError error } func TestCumulativeToDeltaProcessor(t *testing.T) { @@ -436,6 +503,123 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { isMonotonic: []bool{true}, }), }, + { + name: "cumulative_to_delta_exclude_sum_metrics", + include: MatchMetrics{}, + exclude: MatchMetrics{ + MetricTypes: []string{"sum"}, + }, + inMetrics: generateMixedTestMetrics( + testSumMetric{ + metricNames: []string{"metric_1"}, + metricValues: [][]float64{{0, 100, 200, 500}}, + isCumulative: []bool{true, true}, + isMonotonic: []bool{true, true}, + }, + testHistogramMetric{ + metricNames: []string{"metric_2"}, + metricCounts: [][]uint64{{0, 100, 200, 500}}, + metricSums: [][]float64{{0, 100, 200, 500}}, + metricBuckets: [][][]uint64{ + {{0, 0, 0}, {50, 25, 25}, {100, 50, 50}, {250, 125, 125}}, + }, + metricMins: [][]float64{ + {0, 5.0, 2.0, 3.0}, + }, + metricMaxes: [][]float64{ + {0, 800.0, 825.0, 800.0}, + }, + isCumulative: []bool{true}, + }, + ), + outMetrics: generateMixedTestMetrics( + testSumMetric{ + metricNames: []string{"metric_1"}, + metricValues: [][]float64{{0, 100, 200, 500}}, + isCumulative: []bool{true}, + isMonotonic: []bool{true}, + }, + testHistogramMetric{ + metricNames: []string{"metric_2"}, + metricCounts: [][]uint64{{100, 100, 300}}, + metricSums: [][]float64{{100, 100, 300}}, + metricBuckets: [][][]uint64{ + {{50, 25, 25}, {50, 25, 25}, {150, 75, 75}}, + }, + metricMins: [][]float64{ + nil, + }, + metricMaxes: [][]float64{ + nil, + }, + isCumulative: []bool{false}, + }), + }, + { + name: "cumulative_to_delta_include_histogram_metrics", + include: MatchMetrics{ + MetricTypes: []string{"histogram"}, + }, + inMetrics: generateMixedTestMetrics( + testSumMetric{ + metricNames: []string{"metric_1"}, + metricValues: [][]float64{{0, 100, 200, 500}}, + isCumulative: []bool{true, true}, + isMonotonic: []bool{true, true}, + }, + testHistogramMetric{ + metricNames: []string{"metric_2"}, + metricCounts: [][]uint64{{0, 100, 200, 500}}, + metricSums: [][]float64{{0, 100, 200, 500}}, + metricBuckets: [][][]uint64{ + {{0, 0, 0}, {50, 25, 25}, {100, 50, 50}, {250, 125, 125}}, + }, + metricMins: [][]float64{ + {0, 5.0, 2.0, 3.0}, + }, + metricMaxes: [][]float64{ + {0, 800.0, 825.0, 800.0}, + }, + isCumulative: []bool{true}, + }, + ), + outMetrics: generateMixedTestMetrics( + testSumMetric{ + metricNames: []string{"metric_1"}, + metricValues: [][]float64{{0, 100, 200, 500}}, + isCumulative: []bool{true}, + isMonotonic: []bool{true}, + }, + testHistogramMetric{ + metricNames: []string{"metric_2"}, + metricCounts: [][]uint64{{100, 100, 300}}, + metricSums: [][]float64{{100, 100, 300}}, + metricBuckets: [][][]uint64{ + {{50, 25, 25}, {50, 25, 25}, {150, 75, 75}}, + }, + metricMins: [][]float64{ + nil, + }, + metricMaxes: [][]float64{ + nil, + }, + isCumulative: []bool{false}, + }), + }, + { + name: "cumulative_to_delta_unsupported_include_metric_type", + include: MatchMetrics{ + MetricTypes: []string{"summary"}, + }, + wantError: errors.New("unsupported metric type filter: summary"), + }, + { + name: "cumulative_to_delta_unsupported_exclude_metric_type", + include: MatchMetrics{ + MetricTypes: []string{"summary"}, + }, + wantError: errors.New("unsupported metric type filter: summary"), + }, } for _, test := range testCases { @@ -453,6 +637,12 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { cfg, next, ) + + if test.wantError != nil { + require.ErrorContains(t, err, test.wantError.Error()) + require.Nil(t, mgp) + return + } assert.NotNil(t, mgp) assert.NoError(t, err) @@ -540,27 +730,7 @@ func generateTestSumMetrics(tm testSumMetric) pmetric.Metrics { rm := md.ResourceMetrics().AppendEmpty() ms := rm.ScopeMetrics().AppendEmpty().Metrics() - for i, name := range tm.metricNames { - m := ms.AppendEmpty() - m.SetName(name) - sum := m.SetEmptySum() - sum.SetIsMonotonic(tm.isMonotonic[i]) - - if tm.isCumulative[i] { - sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - } else { - sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - } - - for index, value := range tm.metricValues[i] { - dp := m.Sum().DataPoints().AppendEmpty() - dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) - dp.SetDoubleValue(value) - if len(tm.flags) > i && len(tm.flags[i]) > index { - dp.SetFlags(tm.flags[i][index]) - } - } - } + tm.addToMetrics(ms, now) return md } @@ -571,44 +741,20 @@ func generateTestHistogramMetrics(tm testHistogramMetric) pmetric.Metrics { rm := md.ResourceMetrics().AppendEmpty() ms := rm.ScopeMetrics().AppendEmpty().Metrics() - for i, name := range tm.metricNames { - m := ms.AppendEmpty() - m.SetName(name) - hist := m.SetEmptyHistogram() + tm.addToMetrics(ms, now) - if tm.isCumulative[i] { - hist.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) - } else { - hist.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) - } + return md +} - for index, count := range tm.metricCounts[i] { - dp := m.Histogram().DataPoints().AppendEmpty() - dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) - dp.SetCount(count) +func generateMixedTestMetrics(tsm testSumMetric, thm testHistogramMetric) pmetric.Metrics { + md := pmetric.NewMetrics() + now := time.Now() - sums := tm.metricSums[i] - if len(sums) > 0 { - dp.SetSum(sums[index]) - } - if tm.metricMins != nil { - mins := tm.metricMins[i] - if len(mins) > 0 { - dp.SetMin(mins[index]) - } - } - if tm.metricMaxes != nil { - maxes := tm.metricMaxes[i] - if len(maxes) > 0 { - dp.SetMax(maxes[index]) - } - } - dp.BucketCounts().FromRaw(tm.metricBuckets[i][index]) - if len(tm.flags) > i && len(tm.flags[i]) > index { - dp.SetFlags(tm.flags[i][index]) - } - } - } + rm := md.ResourceMetrics().AppendEmpty() + ms := rm.ScopeMetrics().AppendEmpty().Metrics() + + tsm.addToMetrics(ms, now) + thm.addToMetrics(ms, now) return md } diff --git a/processor/cumulativetodeltaprocessor/testdata/config.yaml b/processor/cumulativetodeltaprocessor/testdata/config.yaml index 31775d239adb..07945488d5c5 100644 --- a/processor/cumulativetodeltaprocessor/testdata/config.yaml +++ b/processor/cumulativetodeltaprocessor/testdata/config.yaml @@ -42,6 +42,49 @@ cumulativetodelta/regexp: - b* max_staleness: 10s +cumulativetodelta/metric_type_filter: + include: + match_type: regexp + metrics: + - a* + metric_types: + - sum + exclude: + match_type: regexp + metrics: + - b* + metric_types: + - histogram + max_staleness: 10s + +cumulativetodelta/invalid_include_metric_type_filter: + include: + match_type: regexp + metrics: + - a* + metric_types: + - gauge + exclude: + match_type: regexp + metrics: + - b* + metric_types: + - histogram + max_staleness: 10s + +cumulativetodelta/invalid_exclude_metric_type_filter: + include: + match_type: regexp + metrics: + - a* + exclude: + match_type: regexp + metrics: + - b* + metric_types: + - Invalid + max_staleness: 10s + cumulativetodelta/auto: initial_value: auto diff --git a/processor/deltatocumulativeprocessor/internal/data/add.go b/processor/deltatocumulativeprocessor/internal/data/add.go index 13b9a8151106..c1a1ee6ad8f7 100644 --- a/processor/deltatocumulativeprocessor/internal/data/add.go +++ b/processor/deltatocumulativeprocessor/internal/data/add.go @@ -80,7 +80,7 @@ func (dp ExpHistogram) Add(in ExpHistogram) ExpHistogram { // Downscale if an expected number of buckets after the merge is too large. from := expo.Scale(dp.Scale()) - to := max( + to := min( expo.Limit(maxBuckets, from, dp.Positive(), in.Positive()), expo.Limit(maxBuckets, from, dp.Negative(), in.Negative()), ) diff --git a/processor/deltatocumulativeprocessor/internal/data/expo_test.go b/processor/deltatocumulativeprocessor/internal/data/expo_test.go index 970eda2b67c7..768f1f39b1cf 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo_test.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo_test.go @@ -27,10 +27,11 @@ func TestExpoAdd(t *testing.T) { defer func() { maxBuckets = prevMaxBuckets }() cases := []struct { - name string - dp, in expdp - want expdp - flip bool + name string + dp, in expdp + want expdp + flip bool + alsoTryEachSign bool }{{ name: "noop", dp: expdp{PosNeg: bins{0, 0, 0, 0, 0, 0, 0, 0}.Into(), Count: 0}, @@ -108,6 +109,7 @@ func TestExpoAdd(t *testing.T) { PosNeg: bins{3, 3, 3, 3, 3, 3, 3, 3}.Into(), Count: 24, }, + alsoTryEachSign: true, }, { name: "scale/downscale_once_exceeds_limit", dp: expdp{ @@ -125,6 +127,7 @@ func TestExpoAdd(t *testing.T) { PosNeg: rawbs([]uint64{2, 2, 2, 6, 4, 4, 4}, 0), Count: 24, }, + alsoTryEachSign: true, }, { name: "scale/downscale_multiple_times_until_within_limit", dp: expdp{ @@ -142,6 +145,7 @@ func TestExpoAdd(t *testing.T) { PosNeg: rawbs([]uint64{2, 4, 2, 4, 8, 4}, -2), Count: 24, }, + alsoTryEachSign: true, }, { name: "scale/ignore_leading_trailing_zeros_in_bucket_count", dp: expdp{ @@ -159,6 +163,7 @@ func TestExpoAdd(t *testing.T) { PosNeg: rawbs([]uint64{1, 7, 7, 4, 3, 2, 2}, 0), Count: 26, }, + alsoTryEachSign: true, }, { name: "scale/downscale_with_leading_trailing_zeros", dp: expdp{ @@ -176,17 +181,18 @@ func TestExpoAdd(t *testing.T) { PosNeg: rawbs([]uint64{11, 11, 0, 0, 12, 12}, -1), Count: 46, }, + alsoTryEachSign: true, }} for _, cs := range cases { - run := func(dp, in expdp) func(t *testing.T) { + run := func(dp, in, want expdp) func(t *testing.T) { return func(t *testing.T) { is := datatest.New(t) var ( dp = ExpHistogram{dp.Into()} in = ExpHistogram{in.Into()} - want = ExpHistogram{cs.want.Into()} + want = ExpHistogram{want.Into()} ) dp.SetTimestamp(0) @@ -199,14 +205,32 @@ func TestExpoAdd(t *testing.T) { } if cs.flip { - t.Run(cs.name+"-dp", run(cs.dp, cs.in)) - t.Run(cs.name+"-in", run(cs.in, cs.dp)) + t.Run(cs.name+"-dp", run(cs.dp, cs.in, cs.want)) + t.Run(cs.name+"-in", run(cs.in, cs.dp, cs.want)) continue } - t.Run(cs.name, run(cs.dp, cs.in)) + if cs.alsoTryEachSign { + t.Run(cs.name+"-pos", run(clonePosExpdp(cs.dp), clonePosExpdp(cs.in), clonePosExpdp(cs.want))) + t.Run(cs.name+"-neg", run(cloneNegExpdp(cs.dp), cloneNegExpdp(cs.in), cloneNegExpdp(cs.want))) + } + t.Run(cs.name, run(cs.dp, cs.in, cs.want)) } } +func cloneNegExpdp(dp expotest.Histogram) expotest.Histogram { + dp.Neg = pmetric.NewExponentialHistogramDataPointBuckets() + dp.PosNeg.CopyTo(dp.Neg) + dp.PosNeg = expo.Buckets{} + return dp +} + +func clonePosExpdp(dp expotest.Histogram) expotest.Histogram { + dp.Pos = pmetric.NewExponentialHistogramDataPointBuckets() + dp.PosNeg.CopyTo(dp.Pos) + dp.PosNeg = expo.Buckets{} + return dp +} + func rawbs(data []uint64, offset int32) expo.Buckets { bs := pmetric.NewExponentialHistogramDataPointBuckets() bs.BucketCounts().FromRaw(data) diff --git a/processor/probabilisticsamplerprocessor/config_test.go b/processor/probabilisticsamplerprocessor/config_test.go index 96c6a2005e4d..4bd350b6eb8e 100644 --- a/processor/probabilisticsamplerprocessor/config_test.go +++ b/processor/probabilisticsamplerprocessor/config_test.go @@ -84,8 +84,6 @@ func TestLoadInvalidConfig(t *testing.T) { factory := NewFactory() factories.Processors[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck _, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", test.file), factories) require.ErrorContains(t, err, test.contains) }) diff --git a/processor/sumologicprocessor/config_test.go b/processor/sumologicprocessor/config_test.go index 06e891ea9806..12d276b0be6a 100644 --- a/processor/sumologicprocessor/config_test.go +++ b/processor/sumologicprocessor/config_test.go @@ -21,8 +21,6 @@ func TestLoadConfig(t *testing.T) { factory := NewFactory() factories.Processors[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config.yaml"), factories) require.NoError(t, err) diff --git a/receiver/awss3receiver/s3reader.go b/receiver/awss3receiver/s3reader.go index ddf44c5b9fc1..eeac96f4c600 100644 --- a/receiver/awss3receiver/s3reader.go +++ b/receiver/awss3receiver/s3reader.go @@ -61,7 +61,7 @@ func newS3Reader(ctx context.Context, notifier statusNotifier, logger *zap.Logge }, nil } -//nolint:golint,unparam +//nolint:golint func (s3Reader *s3Reader) readAll(ctx context.Context, telemetryType string, dataCallback s3ReaderDataCallback) error { var timeStep time.Duration if s3Reader.s3Partition == "hour" { diff --git a/receiver/azureblobreceiver/config_test.go b/receiver/azureblobreceiver/config_test.go index 7796d1047bae..1662c5e0deaa 100644 --- a/receiver/azureblobreceiver/config_test.go +++ b/receiver/azureblobreceiver/config_test.go @@ -22,8 +22,6 @@ func TestLoadConfig(t *testing.T) { factory := NewFactory() factories.Receivers[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config.yaml"), factories) require.NoError(t, err) diff --git a/receiver/azureeventhubreceiver/config_test.go b/receiver/azureeventhubreceiver/config_test.go index 2372194ef18c..7c570df0dda3 100644 --- a/receiver/azureeventhubreceiver/config_test.go +++ b/receiver/azureeventhubreceiver/config_test.go @@ -21,8 +21,6 @@ func TestLoadConfig(t *testing.T) { factory := NewFactory() factories.Receivers[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config.yaml"), factories) require.NoError(t, err) diff --git a/receiver/datadogreceiver/internal/translator/testutil.go b/receiver/datadogreceiver/internal/translator/testutil.go index 254a509b8178..c09bfe165642 100644 --- a/receiver/datadogreceiver/internal/translator/testutil.go +++ b/receiver/datadogreceiver/internal/translator/testutil.go @@ -33,7 +33,7 @@ func requireResourceAttributes(t *testing.T, attrs, expectedAttrs pcommon.Map) { }) } -// nolint:unparam +//nolint:unparam func requireScopeMetrics(t *testing.T, result pmetric.Metrics, expectedScopeMetricsLen, expectedMetricsLen int) { require.Equal(t, expectedScopeMetricsLen, result.ResourceMetrics().At(0).ScopeMetrics().Len()) require.Equal(t, expectedMetricsLen, result.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len()) diff --git a/receiver/elasticsearchreceiver/client_test.go b/receiver/elasticsearchreceiver/client_test.go index 1705adb6869f..d4660da59b13 100644 --- a/receiver/elasticsearchreceiver/client_test.go +++ b/receiver/elasticsearchreceiver/client_test.go @@ -595,7 +595,7 @@ type mockServer struct { type mockServerOption func(*mockServer) -func withBasicAuth(username, password string) mockServerOption { // nolint:unparam +func withBasicAuth(username, password string) mockServerOption { //nolint:unparam return func(m *mockServer) { m.auth = func(u, p string) bool { return u == username && p == password diff --git a/receiver/envoyalsreceiver/Makefile b/receiver/envoyalsreceiver/Makefile new file mode 100644 index 000000000000..ded7a36092dc --- /dev/null +++ b/receiver/envoyalsreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/receiver/envoyalsreceiver/README.md b/receiver/envoyalsreceiver/README.md new file mode 100644 index 000000000000..1ef1fb384c17 --- /dev/null +++ b/receiver/envoyalsreceiver/README.md @@ -0,0 +1,44 @@ +# Envoy ALS(access log service) receiver + + +| Status | | +| ------------- |-----------| +| Stability | [development]: logs | +| Distributions | [] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fenvoyals%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fenvoyals) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fenvoyals%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fenvoyals) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@evan-bradley](https://www.github.com/evan-bradley) | + +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development + + +This is a receiver for the [Envoy gRPC ALS](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/access_loggers/grpc/v3/als.proto#envoy-v3-api-msg-extensions-access-loggers-grpc-v3-httpgrpcaccesslogconfig) sink. + +Envoy ALS (Access Log Service) is a feature of Envoy Proxy that allows for the +centralized collection and management of access logs. + +Instead of writing access logs to local files, Envoy can be configured to send these logs to a remote gRPC service. + +This is particularly useful in distributed systems where centralized logging is required for monitoring, auditing, and debugging purposes. + +[Istio](https://istio.io) and [Envoy Gateway](https://gateway.envoyproxy.io) support OTLP and gRPC ALS with first class API. + +## Getting Started + +The settings are: + +- `endpoint` (required, default = localhost:19001 gRPC protocol): host:port to which the receiver is going to receive data. See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. + +Example: +```yaml +receivers: + envoyals: + endpoint: 0.0.0.0:3500 +``` + +## Advanced Configuration + +Other options can be configured to support more advanced use cases: + +- [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) including CORS +- [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) + diff --git a/receiver/envoyalsreceiver/als.go b/receiver/envoyalsreceiver/als.go new file mode 100644 index 000000000000..f9a32e87a3ca --- /dev/null +++ b/receiver/envoyalsreceiver/als.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "context" + "errors" + "fmt" + + alsv3 "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.uber.org/zap" + "google.golang.org/grpc" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver/internal/als" +) + +type alsReceiver struct { + conf *Config + nextConsumer consumer.Logs + settings receiver.Settings + serverGRPC *grpc.Server + + obsrepGRPC *receiverhelper.ObsReport +} + +func (r *alsReceiver) Start(ctx context.Context, host component.Host) error { + var err error + r.serverGRPC, err = r.conf.ToServer(ctx, host, r.settings.TelemetrySettings) + if err != nil { + return fmt.Errorf("failed create grpc server error: %w", err) + } + + alsv3.RegisterAccessLogServiceServer(r.serverGRPC, als.New(r.nextConsumer, r.obsrepGRPC)) + + err = r.startGRPCServer(ctx, host) + if err != nil { + return fmt.Errorf("failed to start grpc server error: %w", err) + } + + return err +} + +func (r *alsReceiver) startGRPCServer(ctx context.Context, host component.Host) error { + r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", r.conf.NetAddr.Endpoint)) + listener, err := r.conf.NetAddr.Listen(ctx) + if err != nil { + return err + } + + go func() { + if errGRPC := r.serverGRPC.Serve(listener); !errors.Is(errGRPC, grpc.ErrServerStopped) && errGRPC != nil { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGRPC)) + } + }() + return nil +} + +func (r *alsReceiver) Shutdown(_ context.Context) error { + if r.serverGRPC != nil { + r.serverGRPC.GracefulStop() + } + + return nil +} + +func newALSReceiver(cfg *Config, nextConsumer consumer.Logs, settings receiver.Settings) (*alsReceiver, error) { + r := &alsReceiver{ + conf: cfg, + nextConsumer: nextConsumer, + settings: settings, + } + + var err error + r.obsrepGRPC, err = receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ + ReceiverID: settings.ID, + Transport: "grpc", + ReceiverCreateSettings: settings, + }) + if err != nil { + return nil, err + } + + return r, nil +} diff --git a/receiver/envoyalsreceiver/als_test.go b/receiver/envoyalsreceiver/als_test.go new file mode 100644 index 000000000000..ef11a6e2233c --- /dev/null +++ b/receiver/envoyalsreceiver/als_test.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "context" + "testing" + "time" + + corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + alsdata "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" + alsv3 "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/receiver/receivertest" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/plogtest" +) + +func startGRPCServer(t *testing.T) (*grpc.ClientConn, *consumertest.LogsSink) { + config := &Config{ + ServerConfig: configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: testutil.GetAvailableLocalAddress(t), + Transport: confignet.TransportTypeTCP, + }, + }, + } + sink := new(consumertest.LogsSink) + + set := receivertest.NewNopSettings() + lr, err := newALSReceiver(config, sink, set) + require.NoError(t, err) + + require.NoError(t, lr.Start(context.Background(), componenttest.NewNopHost())) + t.Cleanup(func() { require.NoError(t, lr.Shutdown(context.Background())) }) + + conn, err := grpc.NewClient(config.NetAddr.Endpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + return conn, sink +} + +func TestLogs(t *testing.T) { + // Start grpc server + conn, sink := startGRPCServer(t) + defer func() { + _ = conn.Close() + }() + + client, err := alsv3.NewAccessLogServiceClient(conn).StreamAccessLogs(context.Background()) + require.NoError(t, err) + + tm, err := time.Parse(time.RFC3339Nano, "2020-07-30T01:01:01.123456789Z") + require.NoError(t, err) + ts := int64(pcommon.NewTimestampFromTime(tm)) + + identifier := &alsv3.StreamAccessLogsMessage_Identifier{ + Node: &corev3.Node{ + Id: "test-id", + Cluster: "test-cluster", + }, + LogName: "test-log-name", + } + + httpLog := &alsdata.HTTPAccessLogEntry{ + CommonProperties: &alsdata.AccessLogCommon{ + StartTime: timestamppb.New(tm), + }, + Request: &alsdata.HTTPRequestProperties{ + Path: "/test", + Authority: "example.com", + }, + Response: &alsdata.HTTPResponseProperties{ + ResponseCode: wrapperspb.UInt32(200), + }, + } + + tcpLog := &alsdata.TCPAccessLogEntry{ + CommonProperties: &alsdata.AccessLogCommon{ + StartTime: timestamppb.New(tm), + }, + ConnectionProperties: &alsdata.ConnectionProperties{ + ReceivedBytes: 10, + SentBytes: 20, + }, + } + + tests := []struct { + name string + message *alsv3.StreamAccessLogsMessage + expected plog.Logs + }{ + { + name: "http", + message: &alsv3.StreamAccessLogsMessage{ + Identifier: identifier, + LogEntries: &alsv3.StreamAccessLogsMessage_HttpLogs{ + HttpLogs: &alsv3.StreamAccessLogsMessage_HTTPAccessLogEntries{ + LogEntry: []*alsdata.HTTPAccessLogEntry{ + httpLog, + }, + }, + }, + }, + expected: generateLogs([]Log{ + { + Timestamp: ts, + Attributes: map[string]any{ + "api_version": "v3", + "log_type": "http", + }, + Body: pcommon.NewValueStr(httpLog.String()), + }, + }), + }, + { + name: "tcp", + message: &alsv3.StreamAccessLogsMessage{ + Identifier: identifier, + LogEntries: &alsv3.StreamAccessLogsMessage_TcpLogs{ + TcpLogs: &alsv3.StreamAccessLogsMessage_TCPAccessLogEntries{ + LogEntry: []*alsdata.TCPAccessLogEntry{ + tcpLog, + }, + }, + }, + }, + expected: generateLogs([]Log{ + { + Timestamp: ts, + Attributes: map[string]any{ + "api_version": "v3", + "log_type": "tcp", + }, + Body: pcommon.NewValueStr(tcpLog.String()), + }, + }), + }, + } + + for i, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err = client.Send(tt.message) + require.NoError(t, err, "should not have failed to post logs") + + require.Eventually(t, func() bool { + gotLogs := sink.AllLogs() + + err := plogtest.CompareLogs(tt.expected, gotLogs[i], plogtest.IgnoreObservedTimestamp()) + if err == nil { + return true + } + t.Logf("Logs not received yet: %v", err) + return false + }, 5*time.Second, 100*time.Millisecond) + }) + } +} + +type Log struct { + Timestamp int64 + Body pcommon.Value + Attributes map[string]any +} + +func generateLogs(logs []Log) plog.Logs { + ld := plog.NewLogs() + logSlice := ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords() + + for _, log := range logs { + lr := logSlice.AppendEmpty() + _ = lr.Attributes().FromRaw(log.Attributes) + lr.SetTimestamp(pcommon.Timestamp(log.Timestamp)) + lr.Body().SetStr(log.Body.AsString()) + } + return ld +} diff --git a/receiver/envoyalsreceiver/config.go b/receiver/envoyalsreceiver/config.go new file mode 100644 index 000000000000..57a183022723 --- /dev/null +++ b/receiver/envoyalsreceiver/config.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" +) + +type Config struct { + configgrpc.ServerConfig `mapstructure:",squash"` +} + +var _ component.Config = (*Config)(nil) diff --git a/receiver/envoyalsreceiver/config_test.go b/receiver/envoyalsreceiver/config_test.go new file mode 100644 index 000000000000..c14d9d145d7e --- /dev/null +++ b/receiver/envoyalsreceiver/config_test.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/confmap/confmaptest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver/internal/metadata" +) + +func TestLoadConfig(t *testing.T) { + t.Parallel() + + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + + tests := []struct { + id component.ID + expected component.Config + }{ + { + id: component.NewIDWithName(metadata.Type, "defaults"), + expected: &Config{ + ServerConfig: configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: "localhost:19001", + Transport: confignet.TransportTypeTCP, + }, + }, + }, + }, + { + id: component.NewIDWithName(metadata.Type, "custom"), + expected: &Config{ + ServerConfig: configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: "localhost:4600", + Transport: confignet.TransportTypeTCP, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.id.String(), func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + sub, err := cm.Sub(tt.id.String()) + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(cfg)) + + assert.NoError(t, component.ValidateConfig(cfg)) + assert.Equal(t, tt.expected, cfg) + }) + } +} diff --git a/receiver/envoyalsreceiver/doc.go b/receiver/envoyalsreceiver/doc.go new file mode 100644 index 000000000000..0b647b53c155 --- /dev/null +++ b/receiver/envoyalsreceiver/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" diff --git a/receiver/envoyalsreceiver/factory.go b/receiver/envoyalsreceiver/factory.go new file mode 100644 index 000000000000..bfbb2cee4a1f --- /dev/null +++ b/receiver/envoyalsreceiver/factory.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver/internal/metadata" +) + +const ( + defaultGRPCEndpoint = "localhost:19001" +) + +// NewFactory creates a new ALS receiver factory. +func NewFactory() receiver.Factory { + return receiver.NewFactory( + metadata.Type, + createDefaultConfig, + receiver.WithLogs(newReceiver, metadata.LogsStability)) +} + +func createDefaultConfig() component.Config { + return &Config{ + ServerConfig: configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: defaultGRPCEndpoint, + Transport: confignet.TransportTypeTCP, + }, + }, + } +} + +func newReceiver( + _ context.Context, + st receiver.Settings, + cfg component.Config, + consumer consumer.Logs, +) (receiver.Logs, error) { + alsCfg := cfg.(*Config) + return newALSReceiver(alsCfg, consumer, st) +} diff --git a/receiver/envoyalsreceiver/factory_test.go b/receiver/envoyalsreceiver/factory_test.go new file mode 100644 index 000000000000..7ba5941f65b8 --- /dev/null +++ b/receiver/envoyalsreceiver/factory_test.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envoyalsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confignet" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestCreateDefaultConfig(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + require.NotNil(t, cfg, "failed to create default config") + require.NoError(t, componenttest.CheckConfigStruct(cfg)) +} + +func TestCreateReceiver(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + cfg.(*Config).ServerConfig = configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: defaultGRPCEndpoint, + Transport: confignet.TransportTypeTCP, + }, + } + set := receivertest.NewNopSettings() + receiver, err := factory.CreateLogs(context.Background(), set, cfg, consumertest.NewNop()) + require.NoError(t, err, "receiver creation failed") + require.NotNil(t, receiver, "receiver creation failed") +} diff --git a/receiver/envoyalsreceiver/generated_component_test.go b/receiver/envoyalsreceiver/generated_component_test.go new file mode 100644 index 000000000000..35150d9f1ce0 --- /dev/null +++ b/receiver/envoyalsreceiver/generated_component_test.go @@ -0,0 +1,69 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package envoyalsreceiver + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receivertest" +) + +func TestComponentFactoryType(t *testing.T) { + require.Equal(t, "envoyals", NewFactory().Type().String()) +} + +func TestComponentConfigStruct(t *testing.T) { + require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) +} + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + name string + createFn func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) + }{ + + { + name: "logs", + createFn: func(ctx context.Context, set receiver.Settings, cfg component.Config) (component.Component, error) { + return factory.CreateLogs(ctx, set, cfg, consumertest.NewNop()) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(&cfg)) + + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + host := componenttest.NewNopHost() + require.NoError(t, err) + require.NoError(t, firstRcvr.Start(context.Background(), host)) + require.NoError(t, firstRcvr.Shutdown(context.Background())) + secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + require.NoError(t, err) + require.NoError(t, secondRcvr.Start(context.Background(), host)) + require.NoError(t, secondRcvr.Shutdown(context.Background())) + }) + } +} diff --git a/receiver/envoyalsreceiver/generated_package_test.go b/receiver/envoyalsreceiver/generated_package_test.go new file mode 100644 index 000000000000..2e965ab81961 --- /dev/null +++ b/receiver/envoyalsreceiver/generated_package_test.go @@ -0,0 +1,13 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package envoyalsreceiver + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/receiver/envoyalsreceiver/go.mod b/receiver/envoyalsreceiver/go.mod new file mode 100644 index 000000000000..6030ed07c479 --- /dev/null +++ b/receiver/envoyalsreceiver/go.mod @@ -0,0 +1,88 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver + +go 1.22.0 + +require ( + github.com/envoyproxy/go-control-plane v0.13.1 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.117.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.117.0 + github.com/stretchr/testify v1.10.0 + go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/component/componentstatus v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3 + go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3 + go.uber.org/goleak v1.3.0 + go.uber.org/zap v1.27.0 + google.golang.org/grpc v1.69.4 + google.golang.org/protobuf v1.36.3 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.2 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/go-grpc-compression v1.2.3 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil diff --git a/receiver/envoyalsreceiver/go.sum b/receiver/envoyalsreceiver/go.sum new file mode 100644 index 000000000000..b889a68270a8 --- /dev/null +++ b/receiver/envoyalsreceiver/go.sum @@ -0,0 +1,188 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= +github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3 h1:MxzfNtItYodclGVQDLzdyBaKixbqEKC2sPGxTiY0uEE= +go.opentelemetry.io/collector/client v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:I5195HMWPseUSVEbNaEgMbz8rzx11T59I2YIkJQ2jrE= +go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ODfDW9siyGYEvEv1+oKf0abnpYbIsMwAlXuZMCUFPXw= +go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:/fqrkzmOXsqm4boZaVtxi5YIz39/i8K8Wqd9oryz8Iw= +go.opentelemetry.io/collector/component/componentstatus v0.118.1-0.20250121185328-fbefb22cc2b3 h1:anyK0wvAeTO3QpO2AvVGXaN7t9K/CWQXSHii+0Ygr8o= +go.opentelemetry.io/collector/component/componentstatus v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:V84x0j/NyHPJciFJ5R8DrJrTOgkYFkyXTh7TXQYvol4= +go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ZnCUlmJ6ZqG+pL1fYrEXmg2FG+RxiSay5Fyxa0i79dY= +go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:eug78n4rxt5hdCSDWZ50wpYZXAl0ho/w6IsNtVZzQII= +go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3 h1:FrH9pOMBYyhYnMCeINzeeWeT/RdcUHUnpGWooak4apM= +go.opentelemetry.io/collector/config/configauth v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:4w14UrrB+We1k+gt3/3+34SWKLKQdGDPQ/lpsL0tiHc= +go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3 h1:dJzzLwFqU/j3VHoaJetgUlPOzrZPtg9zUGhKVsM9WUo= +go.opentelemetry.io/collector/config/configcompression v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3 h1:tptVdunGC+0y1KmEYvmgmLRR8Jam4y1KtfYRVoyLw5U= +go.opentelemetry.io/collector/config/configgrpc v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:R//tIJknJigDNZhuDmKiUpPrgCZ79HPKVdq0Jub3fkw= +go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3 h1:z2wSQoQlbMfqEguwKl2NFqD3dhT9wIeRENZmadadvmg= +go.opentelemetry.io/collector/config/confignet v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3 h1:Oi9hXd7YIf3wa4F9SXeKwYyOkB+DRhfZgHjs44Z6jyQ= +go.opentelemetry.io/collector/config/configopaque v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3 h1:AOaJFxyz+7Zlh2AbZd7vu2gYA5a4rSItbwAS7GYAaO4= +go.opentelemetry.io/collector/config/configtelemetry v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3 h1:zeC8GoDbDxtUbEvp8sPCXONuMxqWQPowXEzUZySxSgA= +go.opentelemetry.io/collector/config/configtls v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3 h1:bYJCjMGjEi0hFpVsdkg20ri5ZGhG7VfrlPjdW7FhclI= +go.opentelemetry.io/collector/confmap v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3 h1:rMGS7YpPjLWbykAQNoBZhTZ8OONKSmnewCFggZXMPmg= +go.opentelemetry.io/collector/consumer v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:YyTWeyBUYlVi983ylJAY5qHnCajq67on3A59OpS6A/I= +go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3 h1:wVb72DufdN0fQoScGeK7ByM5GTf0BkdTA4ZtKOQg+RI= +go.opentelemetry.io/collector/consumer/consumererror v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:/fhqEIxH0hmnDa6zm38XzsdURr5GrlC9oKO70JVorHU= +go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:sQKFJz7EYn9e9KsgVNjnLsONuc4w3uUo2+YzM8C2jtE= +go.opentelemetry.io/collector/consumer/consumertest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:fOVRcSFNghbaDpTJtTVHvFEQHeAAW8WEX0dYWbPpgBc= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3 h1:HCyq06lz8dtWHhcKCd5BuhZBu6USgjBEuHyYhBuiw54= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:Ij9o9d7hZb4be6ql6yqMR7xy5fcFR0SSD6RRIYWlu88= +go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3 h1:pigm8Nxub1OMInnkdu9U/Gqm0GuWmYgVUiRa0WuJmo0= +go.opentelemetry.io/collector/extension v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:7yUjnhGc/ota8nhFdLdP3trrYFx3jqtq7NAV+i04eJw= +go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3 h1:ENw3837wlS/3iSu0BIyUNjDIQAstkdBiTaCixj6yzrA= +go.opentelemetry.io/collector/extension/auth v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:cs4Er00Asntjw7aPHRVQDvvtMzppKjRgMECa89b86AE= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3 h1:bTrqWcaRulXfpSQwnWrGlCsN4ZO5wzD931vH2E28Vc4= +go.opentelemetry.io/collector/featuregate v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3 h1:GXjNTD7hyz2Qwuu5uwLYeJTkWECWL6eL41w/JrQIJrU= +go.opentelemetry.io/collector/pdata v1.24.1-0.20250121185328-fbefb22cc2b3/go.mod h1:6lE9r5x41Z9GyvTSBetXSHRikhiZZK5ApmFtX35ZbXc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3 h1:INViss+PcyyzYe/ZFHHFr/h+Mmo7n94nSzdmp68gBqI= +go.opentelemetry.io/collector/pdata/pprofile v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:iD66/nCk+xHh4q/1FBcYBQTEZKZuejggZBkm14/cobA= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3 h1:uXvVXIkbxeEJa9L+xM7b5+2Y/LjfGKX65fQdRfW5+PQ= +go.opentelemetry.io/collector/pipeline v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3 h1:uP/22oV69zYMWFdeCQHlSpVC22UZWmZsHgcdFDW89eo= +go.opentelemetry.io/collector/receiver v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:ycM9v5g4GvMspTtQbwLvmLOv4djo/bVw4RefJreGGaY= +go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3 h1:i9gXuyWdAXD+NVaGJbPnY4q+u5RwkOb/NSBnv1+IAMw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:x9N91YI3onF0+enjYegcHYOb50Of2xO05c8EyE/baJ0= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3 h1:lSOxA/PFNKwCCf0bYwOkTtvYn4Ch4QADFVJU/kuye08= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.1-0.20250121185328-fbefb22cc2b3/go.mod h1:WLPXXIuodY7quBgqCz3OIsPNdBMLDej5nUIbiyyfoUc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/receiver/envoyalsreceiver/internal/als/server.go b/receiver/envoyalsreceiver/internal/als/server.go new file mode 100644 index 000000000000..7928bd6bf5c7 --- /dev/null +++ b/receiver/envoyalsreceiver/internal/als/server.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package als // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver/internal/als" + +import ( + "context" + "errors" + "io" + + alsv3 "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/receiver/receiverhelper" +) + +const ( + apiVersionAttr = "api_version" + apiVersionVal = "v3" + logTypeAttr = "log_type" + httpTypeVal = "http" + tcpTypeVal = "tcp" +) + +type Server struct { + nextConsumer consumer.Logs + obsrep *receiverhelper.ObsReport +} + +func New(nextConsumer consumer.Logs, obsrep *receiverhelper.ObsReport) *Server { + return &Server{ + nextConsumer: nextConsumer, + obsrep: obsrep, + } +} + +func (s *Server) StreamAccessLogs(logStream alsv3.AccessLogService_StreamAccessLogsServer) error { + for { + data, err := logStream.Recv() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return err + } + + ctx := s.obsrep.StartLogsOp(context.Background()) + logs := toLogs(data) + logRecordCount := logs.LogRecordCount() + err = s.nextConsumer.ConsumeLogs(ctx, logs) + s.obsrep.EndLogsOp(ctx, "protobuf", logRecordCount, err) + if err != nil { + return err + } + } + + return nil +} + +func toLogs(data *alsv3.StreamAccessLogsMessage) plog.Logs { + logs := plog.NewLogs() + + rls := logs.ResourceLogs().AppendEmpty() + logSlice := rls.ScopeLogs().AppendEmpty().LogRecords() + + httpLogs := data.GetHttpLogs() + if httpLogs != nil { + for _, httpLog := range httpLogs.LogEntry { + lr := logSlice.AppendEmpty() + lr.SetTimestamp(pcommon.NewTimestampFromTime(httpLog.CommonProperties.StartTime.AsTime())) + lr.Attributes().PutStr(apiVersionAttr, apiVersionVal) + lr.Attributes().PutStr(logTypeAttr, httpTypeVal) + lr.Body().SetStr(httpLog.String()) + } + } + + tcpLogs := data.GetTcpLogs() + if tcpLogs != nil { + for _, tcpLog := range tcpLogs.LogEntry { + lr := logSlice.AppendEmpty() + lr.SetTimestamp(pcommon.NewTimestampFromTime(tcpLog.CommonProperties.StartTime.AsTime())) + lr.Attributes().PutStr(apiVersionAttr, apiVersionVal) + lr.Attributes().PutStr(logTypeAttr, tcpTypeVal) + lr.Body().SetStr(tcpLog.String()) + } + } + return logs +} diff --git a/receiver/envoyalsreceiver/internal/metadata/generated_status.go b/receiver/envoyalsreceiver/internal/metadata/generated_status.go new file mode 100644 index 000000000000..68a29876bc76 --- /dev/null +++ b/receiver/envoyalsreceiver/internal/metadata/generated_status.go @@ -0,0 +1,16 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("envoyals") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver" +) + +const ( + LogsStability = component.StabilityLevelDevelopment +) diff --git a/receiver/envoyalsreceiver/metadata.yaml b/receiver/envoyalsreceiver/metadata.yaml new file mode 100644 index 000000000000..cb9d5cc1a140 --- /dev/null +++ b/receiver/envoyalsreceiver/metadata.yaml @@ -0,0 +1,9 @@ +type: envoyals + +status: + class: receiver + stability: + development: [logs] + distributions: [] + codeowners: + active: [evan-bradley] diff --git a/receiver/envoyalsreceiver/testdata/config.yaml b/receiver/envoyalsreceiver/testdata/config.yaml new file mode 100644 index 000000000000..4883c58986ae --- /dev/null +++ b/receiver/envoyalsreceiver/testdata/config.yaml @@ -0,0 +1,5 @@ +# The following demonstrates how to enable protocols with defaults. +envoyals/defaults: + +envoyals/custom: + endpoint: localhost:4600 diff --git a/receiver/filestatsreceiver/filestats_linux.go b/receiver/filestatsreceiver/filestats_linux.go index c3113bad2a9c..1a57e73bd39f 100644 --- a/receiver/filestatsreceiver/filestats_linux.go +++ b/receiver/filestatsreceiver/filestats_linux.go @@ -19,8 +19,8 @@ func collectStats(now pcommon.Timestamp, fileinfo os.FileInfo, metricsBuilder *m stat := fileinfo.Sys().(*syscall.Stat_t) atime := stat.Atim.Sec ctime := stat.Ctim.Sec - //nolint + //nolint:unconvert metricsBuilder.RecordFileAtimeDataPoint(now, int64(atime)) - //nolint + //nolint:unconvert metricsBuilder.RecordFileCtimeDataPoint(now, int64(ctime), fileinfo.Mode().Perm().String()) } diff --git a/receiver/githubreceiver/config_test.go b/receiver/githubreceiver/config_test.go index 52c5cfd75654..c96b0e8dec13 100644 --- a/receiver/githubreceiver/config_test.go +++ b/receiver/githubreceiver/config_test.go @@ -27,9 +27,6 @@ func TestLoadConfig(t *testing.T) { factory := NewFactory() factories.Receivers[metadata.Type] = factory - - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config.yaml"), factories) require.NoError(t, err) @@ -93,8 +90,6 @@ func TestLoadInvalidConfig_NoScrapers(t *testing.T) { factory := NewFactory() factories.Receivers[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck _, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config-noscrapers.yaml"), factories) require.ErrorContains(t, err, "must specify at least one scraper") @@ -106,8 +101,6 @@ func TestLoadInvalidConfig_InvalidScraperKey(t *testing.T) { factory := NewFactory() factories.Receivers[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck _, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config-invalidscraperkey.yaml"), factories) require.ErrorContains(t, err, "error reading configuration for \"github\": invalid scraper key: \"invalidscraperkey\"") diff --git a/receiver/googlecloudpubsubreceiver/README.md b/receiver/googlecloudpubsubreceiver/README.md index bb8edb3b4dcd..21b4a419b650 100644 --- a/receiver/googlecloudpubsubreceiver/README.md +++ b/receiver/googlecloudpubsubreceiver/README.md @@ -41,29 +41,53 @@ receivers: ## Encoding -You should not need to set the encoding of the subscription as the receiver will try to discover the type of the data -by looking at the `ce-type` and `content-type` attributes of the message. Only when those attributes are not set -must the `encoding` field in the configuration be set. +The `encoding` options allows you to specify Encoding Extensions for decoding messages on the subscription. An +extension need to be configured in the `extensions` section, and added to pipeline in the collectors configuration file. -| ce-type | ce-datacontenttype | encoding | description | -|-----------------------------------|----------------------|-------------------|------------------------------------------------| -| org.opentelemetry.otlp.traces.v1 | application/protobuf | | Decode OTLP trace message | -| org.opentelemetry.otlp.metrics.v1 | application/protobuf | | Decode OTLP metric message | -| org.opentelemetry.otlp.logs.v1 | application/json | | Decode OTLP log message | -| - | - | otlp_proto_trace | Decode OTLP trace message | -| - | - | otlp_proto_metric | Decode OTLP trace message | -| - | - | otlp_proto_log | Decode OTLP trace message | -| - | - | cloud_logging | Decode [Cloud Logging] [LogEntry] message type | -| - | - | raw_text | Wrap in an OTLP log message | +The following example shows how to use the text encoding extension for ingesting arbitrary text message on a +subscription, wrapping them in OTLP Log messages. Note that not all extensions support all signals. + +```yaml +extensions: + text_encoding: + encoding: utf8 + unmarshaling_separator: "\r?\n" + +service: + extensions: [text_encoding] + pipelines: + logs: + receivers: [googlecloudpubsub] + processors: [] + exporters: [debug] +``` -When the `encoding` configuration is set, the attributes on the message are ignored. +The receiver also supports build in encodings for the native OTLP encodings, without the need to specify an Encoding +Extensions. The non OTLP build in encodings will be deprecated as soon as extensions for the formats are available. + +| encoding | description | +|-------------------|------------------------------------------------| +| otlp_proto_trace | Decode OTLP trace message | +| otlp_proto_metric | Decode OTLP trace message | +| otlp_proto_log | Decode OTLP trace message | +| cloud_logging | Decode [Cloud Logging] [LogEntry] message type | +| raw_text | Wrap in an OTLP log message | With `cloud_logging`, the receiver can be used to bring Cloud Logging messages into an OpenTelemetry pipeline. You'll first need to [set up a logging sink][sink-docs] with a Pub/Sub topic as its destination. Note that the `cloud_logging` integration is considered **alpha** as the semantic convention on some of the conversion are not stabilized yet. With `raw_text`, the receiver can be used for ingesting arbitrary text message on a Pubsub subscription, wrapping them -in OTLP Log messages, making it a convenient way to ingest raw log lines from Pubsub. +in OTLP Log messages. + +When no encoding is specified, the receiver will try to discover the type of the data by looking at the `ce-type` and +`content-type` attributes of the message. These message attributes are set by the `googlepubsubexporter`. + +| ce-type | ce-datacontenttype | encoding | description | +|-----------------------------------|----------------------|-------------------|------------------------------------------------| +| org.opentelemetry.otlp.traces.v1 | application/protobuf | | Decode OTLP trace message | +| org.opentelemetry.otlp.metrics.v1 | application/protobuf | | Decode OTLP metric message | +| org.opentelemetry.otlp.logs.v1 | application/protobuf | | Decode OTLP log message | [Cloud Logging]: https://cloud.google.com/logging [LogEntry]: https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry diff --git a/receiver/googlecloudpubsubreceiver/config.go b/receiver/googlecloudpubsubreceiver/config.go index 8dbdb8b9a3e7..d167a29dd63b 100644 --- a/receiver/googlecloudpubsubreceiver/config.go +++ b/receiver/googlecloudpubsubreceiver/config.go @@ -35,51 +35,6 @@ type Config struct { ClientID string `mapstructure:"client_id"` } -func (config *Config) validateForLog() error { - err := config.validate() - if err != nil { - return err - } - switch config.Encoding { - case "": - case "otlp_proto_log": - case "raw_text": - case "raw_json": - case "cloud_logging": - default: - return fmt.Errorf("log encoding %v is not supported. supported encoding formats include [otlp_proto_log,raw_text,raw_json,cloud_logging]", config.Encoding) - } - return nil -} - -func (config *Config) validateForTrace() error { - err := config.validate() - if err != nil { - return err - } - switch config.Encoding { - case "": - case "otlp_proto_trace": - default: - return fmt.Errorf("trace encoding %v is not supported. supported encoding formats include [otlp_proto_trace]", config.Encoding) - } - return nil -} - -func (config *Config) validateForMetric() error { - err := config.validate() - if err != nil { - return err - } - switch config.Encoding { - case "": - case "otlp_proto_metric": - default: - return fmt.Errorf("metric encoding %v is not supported. supported encoding formats include [otlp_proto_metric]", config.Encoding) - } - return nil -} - func (config *Config) validate() error { if !subscriptionMatcher.MatchString(config.Subscription) { return fmt.Errorf("subscription '%s' is not a valid format, use 'projects//subscriptions/'", config.Subscription) diff --git a/receiver/googlecloudpubsubreceiver/config_test.go b/receiver/googlecloudpubsubreceiver/config_test.go index 7dfb798ab6be..6b86acc14bb4 100644 --- a/receiver/googlecloudpubsubreceiver/config_test.go +++ b/receiver/googlecloudpubsubreceiver/config_test.go @@ -63,9 +63,6 @@ func TestLoadConfig(t *testing.T) { func TestConfigValidation(t *testing.T) { factory := NewFactory() c := factory.CreateDefaultConfig().(*Config) - assert.Error(t, c.validateForTrace()) - assert.Error(t, c.validateForLog()) - assert.Error(t, c.validateForMetric()) c.Subscription = "projects/000project/subscriptions/my-subscription" assert.Error(t, c.validate()) c.Subscription = "projects/my-project/topics/my-topic" @@ -73,60 +70,3 @@ func TestConfigValidation(t *testing.T) { c.Subscription = "projects/my-project/subscriptions/my-subscription" assert.NoError(t, c.validate()) } - -func TestTraceConfigValidation(t *testing.T) { - factory := NewFactory() - c := factory.CreateDefaultConfig().(*Config) - c.Subscription = "projects/my-project/subscriptions/my-subscription" - assert.NoError(t, c.validateForTrace()) - - c.Encoding = "otlp_proto_metric" - assert.Error(t, c.validateForTrace()) - c.Encoding = "otlp_proto_log" - assert.Error(t, c.validateForTrace()) - c.Encoding = "raw_text" - assert.Error(t, c.validateForTrace()) - c.Encoding = "raw_json" - assert.Error(t, c.validateForTrace()) - - c.Encoding = "otlp_proto_trace" - assert.NoError(t, c.validateForTrace()) -} - -func TestMetricConfigValidation(t *testing.T) { - factory := NewFactory() - c := factory.CreateDefaultConfig().(*Config) - c.Subscription = "projects/my-project/subscriptions/my-subscription" - assert.NoError(t, c.validateForMetric()) - - c.Encoding = "otlp_proto_trace" - assert.Error(t, c.validateForMetric()) - c.Encoding = "otlp_proto_log" - assert.Error(t, c.validateForMetric()) - c.Encoding = "raw_text" - assert.Error(t, c.validateForMetric()) - c.Encoding = "raw_json" - assert.Error(t, c.validateForMetric()) - - c.Encoding = "otlp_proto_metric" - assert.NoError(t, c.validateForMetric()) -} - -func TestLogConfigValidation(t *testing.T) { - factory := NewFactory() - c := factory.CreateDefaultConfig().(*Config) - c.Subscription = "projects/my-project/subscriptions/my-subscription" - assert.NoError(t, c.validateForLog()) - - c.Encoding = "otlp_proto_trace" - assert.Error(t, c.validateForLog()) - c.Encoding = "otlp_proto_metric" - assert.Error(t, c.validateForLog()) - - c.Encoding = "raw_text" - assert.NoError(t, c.validateForLog()) - c.Encoding = "raw_json" - assert.NoError(t, c.validateForLog()) - c.Encoding = "otlp_proto_log" - assert.NoError(t, c.validateForLog()) -} diff --git a/receiver/googlecloudpubsubreceiver/factory.go b/receiver/googlecloudpubsubreceiver/factory.go index 96ccc49d5814..802718a55fb4 100644 --- a/receiver/googlecloudpubsubreceiver/factory.go +++ b/receiver/googlecloudpubsubreceiver/factory.go @@ -71,7 +71,7 @@ func (factory *pubsubReceiverFactory) CreateTraces( cfg component.Config, consumer consumer.Traces, ) (receiver.Traces, error) { - err := cfg.(*Config).validateForTrace() + err := cfg.(*Config).validate() if err != nil { return nil, err } @@ -89,7 +89,7 @@ func (factory *pubsubReceiverFactory) CreateMetrics( cfg component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { - err := cfg.(*Config).validateForMetric() + err := cfg.(*Config).validate() if err != nil { return nil, err } @@ -107,7 +107,7 @@ func (factory *pubsubReceiverFactory) CreateLogs( cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { - err := cfg.(*Config).validateForLog() + err := cfg.(*Config).validate() if err != nil { return nil, err } diff --git a/receiver/googlecloudpubsubreceiver/go.mod b/receiver/googlecloudpubsubreceiver/go.mod index f0a8fd4c2845..ce68b9893147 100644 --- a/receiver/googlecloudpubsubreceiver/go.mod +++ b/receiver/googlecloudpubsubreceiver/go.mod @@ -9,6 +9,7 @@ require ( github.com/googleapis/gax-go/v2 v2.14.1 github.com/iancoleman/strcase v0.3.0 github.com/json-iterator/go v1.1.12 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding v0.114.0 github.com/stretchr/testify v1.10.0 go.opentelemetry.io/collector/component v0.118.1-0.20250121185328-fbefb22cc2b3 go.opentelemetry.io/collector/component/componenttest v0.118.1-0.20250121185328-fbefb22cc2b3 @@ -37,7 +38,7 @@ require ( cloud.google.com/go/iam v1.2.2 // indirect cloud.google.com/go/longrunning v0.6.2 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -55,7 +56,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect go.einride.tech/aip v0.68.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect @@ -92,3 +93,5 @@ retract ( v0.76.1 v0.65.0 ) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding => ../../extension/encoding diff --git a/receiver/googlecloudpubsubreceiver/go.sum b/receiver/googlecloudpubsubreceiver/go.sum index 63a63dac35fe..838a13d37be6 100644 --- a/receiver/googlecloudpubsubreceiver/go.sum +++ b/receiver/googlecloudpubsubreceiver/go.sum @@ -22,8 +22,9 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -100,8 +101,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= diff --git a/receiver/googlecloudpubsubreceiver/internal/log_entry.go b/receiver/googlecloudpubsubreceiver/internal/log_entry.go index 282ed890e6f3..8ebc3f2fd9a3 100644 --- a/receiver/googlecloudpubsubreceiver/internal/log_entry.go +++ b/receiver/googlecloudpubsubreceiver/internal/log_entry.go @@ -5,7 +5,6 @@ package internal // import "github.com/open-telemetry/opentelemetry-collector-co import ( "bytes" - "context" "encoding/hex" stdjson "encoding/json" "errors" @@ -20,7 +19,6 @@ import ( jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" - "go.uber.org/zap" "google.golang.org/genproto/googleapis/api/monitoredres" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/reflect/protoreflect" @@ -113,7 +111,7 @@ func getLogEntryDescriptor() protoreflect.MessageDescriptor { // schema; this ensures that a numeric value in the input is correctly // translated to either an integer or a double in the output. It falls back to // plain JSON decoding if payload type is not available in the proto registry. -func TranslateLogEntry(_ context.Context, _ *zap.Logger, data []byte) (pcommon.Resource, plog.LogRecord, error) { +func TranslateLogEntry(data []byte) (pcommon.Resource, plog.LogRecord, error) { lr := plog.NewLogRecord() res := pcommon.NewResource() diff --git a/receiver/googlecloudpubsubreceiver/internal/log_entry_test.go b/receiver/googlecloudpubsubreceiver/internal/log_entry_test.go index a5b959b06013..5eef975189f0 100644 --- a/receiver/googlecloudpubsubreceiver/internal/log_entry_test.go +++ b/receiver/googlecloudpubsubreceiver/internal/log_entry_test.go @@ -4,7 +4,6 @@ package internal import ( - "context" "fmt" "testing" "time" @@ -15,7 +14,6 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/multierr" - "go.uber.org/zap" ) type Log struct { @@ -70,15 +68,12 @@ func TestTranslateLogEntry(t *testing.T) { }{ // TODO: Add publicly shareable log test data. } - - logger, _ := zap.NewDevelopment() - for _, tt := range tests { var errs error wantRes, wantLr, err := generateLog(t, tt.want) errs = multierr.Append(errs, err) - gotRes, gotLr, err := TranslateLogEntry(context.TODO(), logger, []byte(tt.input)) + gotRes, gotLr, err := TranslateLogEntry([]byte(tt.input)) errs = multierr.Append(errs, err) errs = multierr.Combine(errs, compareResources(wantRes, gotRes), compareLogRecords(wantLr, gotLr)) diff --git a/receiver/googlecloudpubsubreceiver/receiver.go b/receiver/googlecloudpubsubreceiver/receiver.go index caecafc0135a..9fb36f6b1d6d 100644 --- a/receiver/googlecloudpubsubreceiver/receiver.go +++ b/receiver/googlecloudpubsubreceiver/receiver.go @@ -24,6 +24,7 @@ import ( "go.opentelemetry.io/collector/receiver/receiverhelper" "go.uber.org/zap" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/encoding" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver/internal" ) @@ -44,29 +45,71 @@ type pubsubReceiver struct { startOnce sync.Once } -type encoding int +type buildInEncoding int const ( - unknown encoding = iota - otlpProtoTrace = iota - otlpProtoMetric = iota - otlpProtoLog = iota - rawTextLog = iota - cloudLogging = iota + unknown buildInEncoding = iota + otlpProtoTrace = iota + otlpProtoMetric = iota + otlpProtoLog = iota + rawTextLog = iota + cloudLogging = iota ) -type compression int +type buildInCompression int const ( - uncompressed compression = iota - gZip = iota + uncompressed buildInCompression = iota + gZip = iota ) -func (receiver *pubsubReceiver) Start(ctx context.Context, _ component.Host) error { +// consumerCount returns the number of attached consumers, useful for detecting errors in pipelines +func (receiver *pubsubReceiver) consumerCount() int { + count := 0 + if receiver.logsConsumer != nil { + count++ + } + if receiver.metricsConsumer != nil { + count++ + } + if receiver.tracesConsumer != nil { + count++ + } + return count +} + +func (receiver *pubsubReceiver) Start(ctx context.Context, host component.Host) error { if receiver.tracesConsumer == nil && receiver.metricsConsumer == nil && receiver.logsConsumer == nil { return errors.New("cannot start receiver: no consumers were specified") } + var createHandlerFn func(context.Context) error + + if receiver.config.Encoding != "" { + if receiver.consumerCount() > 1 { + return errors.New("cannot start receiver: multiple consumers were attached, but encoding was specified") + } + encodingID := convertEncoding(receiver.config.Encoding) + if encodingID == unknown { + err := receiver.setMarshallerFromExtension(host) + if err != nil { + return err + } + } else { + err := receiver.setMarshallerFromEncodingID(encodingID) + if err != nil { + return err + } + } + createHandlerFn = receiver.createReceiverHandler + } else { + // we will rely on the attributes of the message to determine the signal, so we need all proto unmarshalers + receiver.tracesUnmarshaler = &ptrace.ProtoUnmarshaler{} + receiver.metricsUnmarshaler = &pmetric.ProtoUnmarshaler{} + receiver.logsUnmarshaler = &plog.ProtoUnmarshaler{} + createHandlerFn = receiver.createMultiplexingReceiverHandler + } + var startErr error receiver.startOnce.Do(func() { client, err := newSubscriberClient(ctx, receiver.config, receiver.userAgent) @@ -76,18 +119,79 @@ func (receiver *pubsubReceiver) Start(ctx context.Context, _ component.Host) err } receiver.client = client - err = receiver.createReceiverHandler(ctx) + err = createHandlerFn(ctx) if err != nil { startErr = fmt.Errorf("failed to create ReceiverHandler: %w", err) return } }) - receiver.tracesUnmarshaler = &ptrace.ProtoUnmarshaler{} - receiver.metricsUnmarshaler = &pmetric.ProtoUnmarshaler{} - receiver.logsUnmarshaler = &plog.ProtoUnmarshaler{} return startErr } +func (receiver *pubsubReceiver) setMarshallerFromExtension(host component.Host) error { + extensionID := component.ID{} + err := extensionID.UnmarshalText([]byte(receiver.config.Encoding)) + if err != nil { + return errors.New("cannot start receiver: neither a build in encoder, or an extension") + } + extensions := host.GetExtensions() + if extension, ok := extensions[extensionID]; ok { + if receiver.tracesConsumer != nil { + receiver.tracesUnmarshaler, ok = extension.(encoding.TracesUnmarshalerExtension) + if !ok { + return fmt.Errorf("cannot start receiver: extension %q is not a trace unmarshaler", extensionID) + } + } + if receiver.logsConsumer != nil { + receiver.logsUnmarshaler, ok = extension.(encoding.LogsUnmarshalerExtension) + if !ok { + return fmt.Errorf("cannot start receiver: extension %q is not a logs unmarshaler", extensionID) + } + } + if receiver.metricsConsumer != nil { + receiver.metricsUnmarshaler, ok = extension.(encoding.MetricsUnmarshalerExtension) + if !ok { + return fmt.Errorf("cannot start receiver: extension %q is not a metrics unmarshaler", extensionID) + } + } + } else { + return fmt.Errorf("cannot start receiver: extension %q not found", extensionID) + } + return nil +} + +func (receiver *pubsubReceiver) setMarshallerFromEncodingID(encodingID buildInEncoding) error { + if receiver.tracesConsumer != nil { + switch encodingID { + case otlpProtoTrace: + receiver.tracesUnmarshaler = &ptrace.ProtoUnmarshaler{} + default: + return fmt.Errorf("cannot start receiver: build in encoding %s is not supported for traces", receiver.config.Encoding) + } + } + if receiver.logsConsumer != nil { + switch encodingID { + case otlpProtoLog: + receiver.logsUnmarshaler = &plog.ProtoUnmarshaler{} + case rawTextLog: + receiver.logsUnmarshaler = unmarshalLogStrings{} + case cloudLogging: + receiver.logsUnmarshaler = unmarshalCloudLoggingLogEntry{} + default: + return fmt.Errorf("cannot start receiver: build in encoding %s is not supported for logs", receiver.config.Encoding) + } + } + if receiver.metricsConsumer != nil { + switch encodingID { + case otlpProtoMetric: + receiver.metricsUnmarshaler = &pmetric.ProtoUnmarshaler{} + default: + return fmt.Errorf("cannot start receiver: build in encoding %s is not supported for metrics", receiver.config.Encoding) + } + } + return nil +} + func (receiver *pubsubReceiver) Shutdown(_ context.Context) error { if receiver.handler != nil { receiver.logger.Info("Stopping Google Pubsub receiver") @@ -103,13 +207,9 @@ func (receiver *pubsubReceiver) Shutdown(_ context.Context) error { return client.Close() } -func (receiver *pubsubReceiver) handleLogStrings(ctx context.Context, message *pubsubpb.ReceivedMessage) error { - if receiver.logsConsumer == nil { - return nil - } - data := string(message.Message.Data) - timestamp := message.GetMessage().PublishTime +type unmarshalLogStrings struct{} +func (unmarshalLogStrings) UnmarshalLogs(data []byte) (plog.Logs, error) { out := plog.NewLogs() logs := out.ResourceLogs() rls := logs.AppendEmpty() @@ -117,22 +217,34 @@ func (receiver *pubsubReceiver) handleLogStrings(ctx context.Context, message *p ills := rls.ScopeLogs().AppendEmpty() lr := ills.LogRecords().AppendEmpty() - lr.Body().SetStr(data) - lr.SetTimestamp(pcommon.NewTimestampFromTime(timestamp.AsTime())) + lr.Body().SetStr(string(data)) + return out, nil +} + +func (receiver *pubsubReceiver) handleLogStrings(ctx context.Context, payload []byte) error { + if receiver.logsConsumer == nil { + return nil + } + unmarshall := unmarshalLogStrings{} + out, err := unmarshall.UnmarshalLogs(payload) + if err != nil { + return err + } return receiver.logsConsumer.ConsumeLogs(ctx, out) } -func (receiver *pubsubReceiver) handleCloudLoggingLogEntry(ctx context.Context, message *pubsubpb.ReceivedMessage) error { - resource, lr, err := internal.TranslateLogEntry(ctx, receiver.logger, message.Message.Data) +type unmarshalCloudLoggingLogEntry struct{} + +func (unmarshalCloudLoggingLogEntry) UnmarshalLogs(data []byte) (plog.Logs, error) { + resource, lr, err := internal.TranslateLogEntry(data) + out := plog.NewLogs() lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now())) if err != nil { - receiver.logger.Error("got an error", zap.Error(err)) - return err + return out, err } - out := plog.NewLogs() logs := out.ResourceLogs() rls := logs.AppendEmpty() resource.CopyTo(rls.Resource()) @@ -140,10 +252,10 @@ func (receiver *pubsubReceiver) handleCloudLoggingLogEntry(ctx context.Context, ills := rls.ScopeLogs().AppendEmpty() lr.CopyTo(ills.LogRecords().AppendEmpty()) - return receiver.logsConsumer.ConsumeLogs(ctx, out) + return out, nil } -func decompress(payload []byte, compression compression) ([]byte, error) { +func decompress(payload []byte, compression buildInCompression) ([]byte, error) { if compression == gZip { reader, err := gzip.NewReader(bytes.NewReader(payload)) if err != nil { @@ -154,7 +266,7 @@ func decompress(payload []byte, compression compression) ([]byte, error) { return payload, nil } -func (receiver *pubsubReceiver) handleTrace(ctx context.Context, payload []byte, compression compression) error { +func (receiver *pubsubReceiver) handleTrace(ctx context.Context, payload []byte, compression buildInCompression) error { payload, err := decompress(payload, compression) if err != nil { return err @@ -170,7 +282,7 @@ func (receiver *pubsubReceiver) handleTrace(ctx context.Context, payload []byte, return nil } -func (receiver *pubsubReceiver) handleMetric(ctx context.Context, payload []byte, compression compression) error { +func (receiver *pubsubReceiver) handleMetric(ctx context.Context, payload []byte, compression buildInCompression) error { payload, err := decompress(payload, compression) if err != nil { return err @@ -186,7 +298,7 @@ func (receiver *pubsubReceiver) handleMetric(ctx context.Context, payload []byte return nil } -func (receiver *pubsubReceiver) handleLog(ctx context.Context, payload []byte, compression compression) error { +func (receiver *pubsubReceiver) handleLog(ctx context.Context, payload []byte, compression buildInCompression) error { payload, err := decompress(payload, compression) if err != nil { return err @@ -202,9 +314,9 @@ func (receiver *pubsubReceiver) handleLog(ctx context.Context, payload []byte, c return nil } -func (receiver *pubsubReceiver) detectEncoding(attributes map[string]string) (encoding, compression) { - otlpEncoding := unknown - otlpCompression := uncompressed +func (receiver *pubsubReceiver) detectEncoding(attributes map[string]string) (otlpEncoding buildInEncoding, otlpCompression buildInCompression) { + otlpEncoding = unknown + otlpCompression = uncompressed ceType := attributes["ce-type"] ceContentType := attributes["content-type"] @@ -222,18 +334,7 @@ func (receiver *pubsubReceiver) detectEncoding(attributes map[string]string) (en } if otlpEncoding == unknown && receiver.config.Encoding != "" { - switch receiver.config.Encoding { - case "otlp_proto_trace": - otlpEncoding = otlpProtoTrace - case "otlp_proto_metric": - otlpEncoding = otlpProtoMetric - case "otlp_proto_log": - otlpEncoding = otlpProtoLog - case "cloud_logging": - otlpEncoding = cloudLogging - case "raw_text": - otlpEncoding = rawTextLog - } + otlpEncoding = convertEncoding(receiver.config.Encoding) } ceContentEncoding := attributes["content-encoding"] @@ -246,10 +347,26 @@ func (receiver *pubsubReceiver) detectEncoding(attributes map[string]string) (en otlpCompression = gZip } } - return otlpEncoding, otlpCompression + return } -func (receiver *pubsubReceiver) createReceiverHandler(ctx context.Context) error { +func convertEncoding(encodingConfig string) (encoding buildInEncoding) { + switch encodingConfig { + case "otlp_proto_trace": + return otlpProtoTrace + case "otlp_proto_metric": + return otlpProtoMetric + case "otlp_proto_log": + return otlpProtoLog + case "cloud_logging": + return cloudLogging + case "raw_text": + return rawTextLog + } + return unknown +} + +func (receiver *pubsubReceiver) createMultiplexingReceiverHandler(ctx context.Context) error { var err error receiver.handler, err = internal.NewHandler( ctx, @@ -274,16 +391,14 @@ func (receiver *pubsubReceiver) createReceiverHandler(ctx context.Context) error if receiver.logsConsumer != nil { return receiver.handleLog(ctx, payload, compression) } - case cloudLogging: + case rawTextLog: if receiver.logsConsumer != nil { - return receiver.handleCloudLoggingLogEntry(ctx, message) + return receiver.handleLogStrings(ctx, payload) } - case rawTextLog: - return receiver.handleLogStrings(ctx, message) - case unknown: + default: return errors.New("unknown encoding") } - return errors.New("unknown encoding") + return nil }) if err != nil { return err @@ -291,3 +406,40 @@ func (receiver *pubsubReceiver) createReceiverHandler(ctx context.Context) error receiver.handler.RecoverableStream(ctx) return nil } + +func (receiver *pubsubReceiver) createReceiverHandler(ctx context.Context) error { + var err error + var handlerFn func(context.Context, *pubsubpb.ReceivedMessage) error + compression := uncompressed + if receiver.tracesConsumer != nil { + handlerFn = func(ctx context.Context, message *pubsubpb.ReceivedMessage) error { + payload := message.Message.Data + return receiver.handleTrace(ctx, payload, compression) + } + } + if receiver.logsConsumer != nil { + handlerFn = func(ctx context.Context, message *pubsubpb.ReceivedMessage) error { + payload := message.Message.Data + return receiver.handleLog(ctx, payload, compression) + } + } + if receiver.metricsConsumer != nil { + handlerFn = func(ctx context.Context, message *pubsubpb.ReceivedMessage) error { + payload := message.Message.Data + return receiver.handleMetric(ctx, payload, compression) + } + } + + receiver.handler, err = internal.NewHandler( + ctx, + receiver.logger, + receiver.client, + receiver.config.ClientID, + receiver.config.Subscription, + handlerFn) + if err != nil { + return err + } + receiver.handler.RecoverableStream(ctx) + return nil +} diff --git a/receiver/googlecloudpubsubreceiver/receiver_test.go b/receiver/googlecloudpubsubreceiver/receiver_test.go index 01ed07c3f7ae..72eca61b1315 100644 --- a/receiver/googlecloudpubsubreceiver/receiver_test.go +++ b/receiver/googlecloudpubsubreceiver/receiver_test.go @@ -15,6 +15,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/receiver/receiverhelper" "go.opentelemetry.io/collector/receiver/receivertest" "go.uber.org/zap" @@ -24,13 +25,10 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver/testdata" ) -func TestStartReceiverNoSubscription(t *testing.T) { - ctx := context.Background() - // Start a fake server running locally. +func createBaseReceiver() (*pstest.Server, *pubsubReceiver) { srv := pstest.NewServer() - defer srv.Close() core, _ := observer.New(zap.WarnLevel) - receiver := &pubsubReceiver{ + return srv, &pubsubReceiver{ logger: zap.New(core), userAgent: "test-user-agent", @@ -44,15 +42,46 @@ func TestStartReceiverNoSubscription(t *testing.T) { Subscription: "projects/my-project/subscriptions/otlp", }, } +} + +type fakeUnmarshalLog struct{} + +func (fakeUnmarshalLog) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (fakeUnmarshalLog) Shutdown(_ context.Context) error { + return nil +} + +func (fakeUnmarshalLog) UnmarshalLogs(_ []byte) (plog.Logs, error) { + return plog.Logs{}, nil +} + +type fakeHost struct{} + +func (fakeHost) GetExtensions() map[component.ID]component.Component { + ext := make(map[component.ID]component.Component) + extensionID := component.ID{} + _ = extensionID.UnmarshalText([]byte("text_encoding")) + ext[extensionID] = fakeUnmarshalLog{} + return ext +} + +func TestStartReceiverNoSubscription(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() defer func() { + assert.NoError(t, srv.Close()) assert.NoError(t, receiver.Shutdown(ctx)) }() + receiver.tracesConsumer = consumertest.NewNop() receiver.metricsConsumer = consumertest.NewNop() receiver.logsConsumer = consumertest.NewNop() // No error is thrown as the stream is handled async, // no locks should be kept though - assert.NoError(t, receiver.Start(ctx, nil)) + assert.NoError(t, receiver.Start(ctx, fakeHost{})) } func TestReceiver(t *testing.T) { @@ -103,14 +132,14 @@ func TestReceiver(t *testing.T) { metricsConsumer: metricSink, logsConsumer: logSink, } - assert.NoError(t, receiver.Start(ctx, nil)) + assert.NoError(t, receiver.Start(ctx, fakeHost{})) receiver.tracesConsumer = traceSink receiver.metricsConsumer = metricSink receiver.logsConsumer = logSink // No error is thrown as the stream is handled async, // no locks should be kept though - assert.NoError(t, receiver.Start(ctx, nil)) + assert.NoError(t, receiver.Start(ctx, fakeHost{})) time.Sleep(1 * time.Second) @@ -156,3 +185,125 @@ func TestReceiver(t *testing.T) { assert.NoError(t, receiver.Shutdown(ctx)) assert.NoError(t, receiver.Shutdown(ctx)) } + +func TestEncodingMultipleConsumersForAnEncoding(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() + defer func() { + assert.NoError(t, srv.Close()) + assert.NoError(t, receiver.Shutdown(ctx)) + }() + + receiver.tracesConsumer = consumertest.NewNop() + receiver.metricsConsumer = consumertest.NewNop() + receiver.logsConsumer = consumertest.NewNop() + receiver.config.Encoding = "foo" + assert.ErrorContains(t, receiver.Start(ctx, fakeHost{}), "multiple consumers were attached") +} + +func TestEncodingBuildInProtoTrace(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() + defer func() { + assert.NoError(t, srv.Close()) + assert.NoError(t, receiver.Shutdown(ctx)) + }() + + receiver.tracesConsumer = consumertest.NewNop() + receiver.config.Encoding = "otlp_proto_trace" + + assert.NoError(t, receiver.Start(ctx, fakeHost{})) + assert.NotNil(t, receiver.tracesConsumer) + assert.Nil(t, receiver.metricsConsumer) + assert.Nil(t, receiver.logsConsumer) +} + +func TestEncodingBuildInProtoMetric(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() + defer func() { + assert.NoError(t, srv.Close()) + assert.NoError(t, receiver.Shutdown(ctx)) + }() + + receiver.metricsConsumer = consumertest.NewNop() + receiver.config.Encoding = "otlp_proto_metric" + + assert.NoError(t, receiver.Start(ctx, fakeHost{})) + assert.Nil(t, receiver.tracesConsumer) + assert.NotNil(t, receiver.metricsConsumer) + assert.Nil(t, receiver.logsConsumer) +} + +func TestEncodingBuildInProtoLog(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() + defer func() { + assert.NoError(t, srv.Close()) + assert.NoError(t, receiver.Shutdown(ctx)) + }() + + receiver.logsConsumer = consumertest.NewNop() + receiver.config.Encoding = "otlp_proto_log" + + assert.NoError(t, receiver.Start(ctx, fakeHost{})) + assert.Nil(t, receiver.tracesConsumer) + assert.Nil(t, receiver.metricsConsumer) + assert.NotNil(t, receiver.logsConsumer) +} + +func TestEncodingConsumerMismatch(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() + defer func() { + assert.NoError(t, srv.Close()) + assert.NoError(t, receiver.Shutdown(ctx)) + }() + + receiver.tracesConsumer = consumertest.NewNop() + receiver.config.Encoding = "otlp_proto_log" + + assert.ErrorContains(t, receiver.Start(ctx, fakeHost{}), "build in encoding otlp_proto_log is not supported for traces") +} + +func TestEncodingNotFound(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() + defer func() { + assert.NoError(t, srv.Close()) + assert.NoError(t, receiver.Shutdown(ctx)) + }() + + receiver.tracesConsumer = consumertest.NewNop() + receiver.config.Encoding = "foo" + assert.ErrorContains(t, receiver.Start(ctx, fakeHost{}), "extension \"foo\" not found") +} + +func TestEncodingExtension(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() + defer func() { + assert.NoError(t, srv.Close()) + assert.NoError(t, receiver.Shutdown(ctx)) + }() + + receiver.tracesConsumer = consumertest.NewNop() + receiver.config.Encoding = "text_encoding" + assert.ErrorContains(t, receiver.Start(ctx, fakeHost{}), "extension \"text_encoding\" is not a trace unmarshaler") +} + +func TestEncodingExtensionMismatch(t *testing.T) { + ctx := context.Background() + srv, receiver := createBaseReceiver() + defer func() { + assert.NoError(t, srv.Close()) + assert.NoError(t, receiver.Shutdown(ctx)) + }() + + receiver.logsConsumer = consumertest.NewNop() + receiver.config.Encoding = "text_encoding" + assert.NoError(t, receiver.Start(ctx, fakeHost{})) + assert.Nil(t, receiver.tracesConsumer) + assert.Nil(t, receiver.metricsConsumer) + assert.NotNil(t, receiver.logsConsumer) +} diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go index 72641950f094..6b750522373b 100644 --- a/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go @@ -56,7 +56,7 @@ func createMetricsMetadataFromTimestampColumn(query string, timestampColumn stri } } -func createCurrentStatsReaderWithCorruptedMetadata(client *spanner.Client) Reader { //nolint +func createCurrentStatsReaderWithCorruptedMetadata(client *spanner.Client) Reader { query := "SELECT * FROM STATS" databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) @@ -65,7 +65,7 @@ func createCurrentStatsReaderWithCorruptedMetadata(client *spanner.Client) Reade createMetricsMetadataFromTimestampColumn(query, "NOT_EXISTING"), ReaderConfig{}) } -func createCurrentStatsReader(client *spanner.Client) Reader { //nolint +func createCurrentStatsReader(client *spanner.Client) Reader { query := "SELECT * FROM STATS" databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) @@ -73,7 +73,7 @@ func createCurrentStatsReader(client *spanner.Client) Reader { //nolint return newCurrentStatsReader(zap.NewNop(), databaseFromClient, createMetricsMetadata(query), ReaderConfig{}) } -func createCurrentStatsReaderWithMaxRowsLimit(client *spanner.Client) Reader { //nolint +func createCurrentStatsReaderWithMaxRowsLimit(client *spanner.Client) Reader { query := "SELECT * FROM STATS" databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) @@ -84,7 +84,7 @@ func createCurrentStatsReaderWithMaxRowsLimit(client *spanner.Client) Reader { / return newCurrentStatsReader(zap.NewNop(), databaseFromClient, createMetricsMetadata(query), config) } -func createIntervalStatsReaderWithCorruptedMetadata(client *spanner.Client, backfillEnabled bool) Reader { //nolint +func createIntervalStatsReaderWithCorruptedMetadata(client *spanner.Client, backfillEnabled bool) Reader { query := "SELECT * FROM STATS WHERE INTERVAL_END = @pullTimestamp" databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) @@ -96,7 +96,7 @@ func createIntervalStatsReaderWithCorruptedMetadata(client *spanner.Client, back createMetricsMetadataFromTimestampColumn(query, "NOT_EXISTING"), config) } -func createIntervalStatsReader(client *spanner.Client, backfillEnabled bool) Reader { //nolint +func createIntervalStatsReader(client *spanner.Client, backfillEnabled bool) Reader { query := "SELECT * FROM STATS WHERE INTERVAL_END = @pullTimestamp" databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) @@ -107,7 +107,7 @@ func createIntervalStatsReader(client *spanner.Client, backfillEnabled bool) Rea return newIntervalStatsReader(zap.NewNop(), databaseFromClient, createMetricsMetadata(query), config) } -func createIntervalStatsReaderWithMaxRowsLimit(client *spanner.Client, backfillEnabled bool) Reader { //nolint +func createIntervalStatsReaderWithMaxRowsLimit(client *spanner.Client, backfillEnabled bool) Reader { query := "SELECT * FROM STATS WHERE INTERVAL_END = @pullTimestamp" databaseID := datasource.NewDatabaseID(projectID, instanceID, databaseName) databaseFromClient := datasource.NewDatabaseFromClient(client, databaseID) diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/pagefile.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/pagefile.go index 385e552df87a..005030ae050d 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/pagefile.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/pagefile.go @@ -8,5 +8,5 @@ type pageFileStats struct { usedBytes uint64 freeBytes uint64 totalBytes uint64 - cachedBytes *uint64 //nolint:unused + cachedBytes *uint64 } diff --git a/receiver/httpcheckreceiver/README.md b/receiver/httpcheckreceiver/README.md index 3ede7c79de99..d047236432bf 100644 --- a/receiver/httpcheckreceiver/README.md +++ b/receiver/httpcheckreceiver/README.md @@ -35,26 +35,46 @@ The following configuration settings are available: Each target has the following properties: -- `endpoint` (required): the URL to be monitored -- `method` (optional, default: `GET`): The HTTP method used to call the endpoint +- `endpoint` (optional): A single URL to be monitored. +- `endpoints` (optional): A list of URLs to be monitored. +- `method` (optional, default: `GET`): The HTTP method used to call the endpoint or endpoints. -Additionally, each target supports the client configuration options of [confighttp]. +At least one of `endpoint` or `endpoints` must be specified. Additionally, each target supports the client configuration options of [confighttp]. ### Example Configuration ```yaml receivers: httpcheck: + collection_interval: 30s targets: - - endpoint: http://endpoint:80 - method: GET - - endpoint: http://localhost:8080/health - method: GET - - endpoint: http://localhost:8081/health - method: POST + - method: "GET" + endpoints: + - "https://opentelemetry.io" + - method: "GET" + endpoints: + - "http://localhost:8080/hello1" + - "http://localhost:8080/hello2" headers: - test-header: "test-value" - collection_interval: 10s + Authorization: "Bearer " + - method: "GET" + endpoint: "http://localhost:8080/hello" + headers: + Authorization: "Bearer " +processors: + batch: + send_batch_max_size: 1000 + send_batch_size: 100 + timeout: 10s +exporters: + debug: + verbosity: detailed +service: + pipelines: + metrics: + receivers: [httpcheck] + processors: [batch] + exporters: [debug] ``` ## Metrics diff --git a/receiver/httpcheckreceiver/config.go b/receiver/httpcheckreceiver/config.go index e51da527952c..6bbd7ce963bc 100644 --- a/receiver/httpcheckreceiver/config.go +++ b/receiver/httpcheckreceiver/config.go @@ -17,8 +17,8 @@ import ( // Predefined error responses for configuration validation failures var ( - errMissingEndpoint = errors.New(`"endpoint" must be specified`) errInvalidEndpoint = errors.New(`"endpoint" must be in the form of ://[:]`) + errMissingEndpoint = errors.New("at least one of 'endpoint' or 'endpoints' must be specified") ) // Config defines the configuration for the various elements of the receiver agent. @@ -28,20 +28,32 @@ type Config struct { Targets []*targetConfig `mapstructure:"targets"` } +// targetConfig defines configuration for individual HTTP checks. type targetConfig struct { confighttp.ClientConfig `mapstructure:",squash"` - Method string `mapstructure:"method"` + Method string `mapstructure:"method"` + Endpoints []string `mapstructure:"endpoints"` // Field for a list of endpoints } -// Validate validates the configuration by checking for missing or invalid fields +// Validate validates an individual targetConfig. func (cfg *targetConfig) Validate() error { var err error - if cfg.Endpoint == "" { + // Ensure at least one of 'endpoint' or 'endpoints' is specified. + if cfg.ClientConfig.Endpoint == "" && len(cfg.Endpoints) == 0 { err = multierr.Append(err, errMissingEndpoint) - } else { - _, parseErr := url.ParseRequestURI(cfg.Endpoint) - if parseErr != nil { + } + + // Validate the single endpoint in ClientConfig. + if cfg.ClientConfig.Endpoint != "" { + if _, parseErr := url.ParseRequestURI(cfg.ClientConfig.Endpoint); parseErr != nil { + err = multierr.Append(err, fmt.Errorf("%s: %w", errInvalidEndpoint.Error(), parseErr)) + } + } + + // Validate each endpoint in the Endpoints list. + for _, endpoint := range cfg.Endpoints { + if _, parseErr := url.ParseRequestURI(endpoint); parseErr != nil { err = multierr.Append(err, fmt.Errorf("%s: %w", errInvalidEndpoint.Error(), parseErr)) } } @@ -49,14 +61,16 @@ func (cfg *targetConfig) Validate() error { return err } -// Validate validates the configuration by checking for missing or invalid fields +// Validate validates the top-level Config by checking each targetConfig. func (cfg *Config) Validate() error { var err error + // Ensure at least one target is configured. if len(cfg.Targets) == 0 { err = multierr.Append(err, errors.New("no targets configured")) } + // Validate each targetConfig. for _, target := range cfg.Targets { err = multierr.Append(err, target.Validate()) } diff --git a/receiver/httpcheckreceiver/config_test.go b/receiver/httpcheckreceiver/config_test.go index e7140f236ecd..b98b60110bd2 100644 --- a/receiver/httpcheckreceiver/config_test.go +++ b/receiver/httpcheckreceiver/config_test.go @@ -105,6 +105,98 @@ func TestValidate(t *testing.T) { }, expectedErr: nil, }, + { + desc: "missing both endpoint and endpoints", + cfg: &Config{ + Targets: []*targetConfig{ + { + ClientConfig: confighttp.ClientConfig{}, + }, + }, + ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + }, + expectedErr: multierr.Combine( + errMissingEndpoint, + ), + }, + { + desc: "invalid single endpoint", + cfg: &Config{ + Targets: []*targetConfig{ + { + ClientConfig: confighttp.ClientConfig{ + Endpoint: "invalid://endpoint: 12efg", + }, + }, + }, + ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + }, + expectedErr: multierr.Combine( + fmt.Errorf("%w: %s", errInvalidEndpoint, `parse "invalid://endpoint: 12efg": invalid port ": 12efg" after host`), + ), + }, + { + desc: "invalid endpoint in endpoints list", + cfg: &Config{ + Targets: []*targetConfig{ + { + Endpoints: []string{ + "https://valid.endpoint", + "invalid://endpoint: 12efg", + }, + }, + }, + ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + }, + expectedErr: multierr.Combine( + fmt.Errorf("%w: %s", errInvalidEndpoint, `parse "invalid://endpoint: 12efg": invalid port ": 12efg" after host`), + ), + }, + { + desc: "missing scheme in single endpoint", + cfg: &Config{ + Targets: []*targetConfig{ + { + ClientConfig: confighttp.ClientConfig{ + Endpoint: "www.opentelemetry.io/docs", + }, + }, + }, + ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + }, + expectedErr: multierr.Combine( + fmt.Errorf("%w: %s", errInvalidEndpoint, `parse "www.opentelemetry.io/docs": invalid URI for request`), + ), + }, + { + desc: "valid single endpoint", + cfg: &Config{ + Targets: []*targetConfig{ + { + ClientConfig: confighttp.ClientConfig{ + Endpoint: "https://opentelemetry.io", + }, + }, + }, + ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + }, + expectedErr: nil, + }, + { + desc: "valid endpoints list", + cfg: &Config{ + Targets: []*targetConfig{ + { + Endpoints: []string{ + "https://opentelemetry.io", + "https://opentelemetry.io:80/docs", + }, + }, + }, + ControllerConfig: scraperhelper.NewDefaultControllerConfig(), + }, + expectedErr: nil, + }, } for _, tc := range testCases { diff --git a/receiver/httpcheckreceiver/scraper.go b/receiver/httpcheckreceiver/scraper.go index e464410f8a15..3408800136da 100644 --- a/receiver/httpcheckreceiver/scraper.go +++ b/receiver/httpcheckreceiver/scraper.go @@ -32,19 +32,43 @@ type httpcheckScraper struct { mb *metadata.MetricsBuilder } -// start starts the scraper by creating a new HTTP Client on the scraper +// start initializes the scraper by creating HTTP clients for each endpoint. func (h *httpcheckScraper) start(ctx context.Context, host component.Host) (err error) { + var expandedTargets []*targetConfig + for _, target := range h.cfg.Targets { - client, clentErr := target.ToClient(ctx, host, h.settings) - if clentErr != nil { - err = multierr.Append(err, clentErr) + // Create a unified list of endpoints + var allEndpoints []string + if len(target.Endpoints) > 0 { + allEndpoints = append(allEndpoints, target.Endpoints...) // Add all endpoints + } + if target.ClientConfig.Endpoint != "" { + allEndpoints = append(allEndpoints, target.ClientConfig.Endpoint) // Add single endpoint + } + + // Process each endpoint in the unified list + for _, endpoint := range allEndpoints { + client, clientErr := target.ToClient(ctx, host, h.settings) + if clientErr != nil { + h.settings.Logger.Error("failed to initialize HTTP client", zap.String("endpoint", endpoint), zap.Error(clientErr)) + err = multierr.Append(err, clientErr) + continue + } + + // Clone the target and assign the specific endpoint + targetClone := *target + targetClone.ClientConfig.Endpoint = endpoint + + h.clients = append(h.clients, client) + expandedTargets = append(expandedTargets, &targetClone) // Add the cloned target to expanded targets } - h.clients = append(h.clients, client) } + + h.cfg.Targets = expandedTargets // Replace targets with expanded targets return } -// scrape connects to the endpoint and produces metrics based on the response +// scrape performs the HTTP checks and records metrics based on responses. func (h *httpcheckScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { if len(h.clients) == 0 { return pmetric.NewMetrics(), errClientNotInit @@ -60,29 +84,64 @@ func (h *httpcheckScraper) scrape(ctx context.Context) (pmetric.Metrics, error) now := pcommon.NewTimestampFromTime(time.Now()) - req, err := http.NewRequestWithContext(ctx, h.cfg.Targets[targetIndex].Method, h.cfg.Targets[targetIndex].Endpoint, http.NoBody) + req, err := http.NewRequestWithContext( + ctx, + h.cfg.Targets[targetIndex].Method, + h.cfg.Targets[targetIndex].ClientConfig.Endpoint, // Use the ClientConfig.Endpoint + http.NoBody, + ) if err != nil { h.settings.Logger.Error("failed to create request", zap.Error(err)) return } + // Add headers to the request + for key, value := range h.cfg.Targets[targetIndex].Headers { + req.Header.Set(key, value.String()) // Convert configopaque.String to string + } + + // Send the request and measure response time start := time.Now() resp, err := targetClient.Do(req) mux.Lock() - h.mb.RecordHttpcheckDurationDataPoint(now, time.Since(start).Milliseconds(), h.cfg.Targets[targetIndex].Endpoint) + h.mb.RecordHttpcheckDurationDataPoint( + now, + time.Since(start).Milliseconds(), + h.cfg.Targets[targetIndex].ClientConfig.Endpoint, // Use the correct endpoint + ) statusCode := 0 if err != nil { - h.mb.RecordHttpcheckErrorDataPoint(now, int64(1), h.cfg.Targets[targetIndex].Endpoint, err.Error()) + h.mb.RecordHttpcheckErrorDataPoint( + now, + int64(1), + h.cfg.Targets[targetIndex].ClientConfig.Endpoint, + err.Error(), + ) } else { statusCode = resp.StatusCode } + // Record HTTP status class metrics for class, intVal := range httpResponseClasses { if statusCode/100 == intVal { - h.mb.RecordHttpcheckStatusDataPoint(now, int64(1), h.cfg.Targets[targetIndex].Endpoint, int64(statusCode), req.Method, class) + h.mb.RecordHttpcheckStatusDataPoint( + now, + int64(1), + h.cfg.Targets[targetIndex].ClientConfig.Endpoint, + int64(statusCode), + req.Method, + class, + ) } else { - h.mb.RecordHttpcheckStatusDataPoint(now, int64(0), h.cfg.Targets[targetIndex].Endpoint, int64(statusCode), req.Method, class) + h.mb.RecordHttpcheckStatusDataPoint( + now, + int64(0), + h.cfg.Targets[targetIndex].ClientConfig.Endpoint, + int64(statusCode), + req.Method, + class, + ) } } mux.Unlock() @@ -90,7 +149,6 @@ func (h *httpcheckScraper) scrape(ctx context.Context) (pmetric.Metrics, error) } wg.Wait() - return h.mb.Emit(), nil } diff --git a/receiver/k8sobjectsreceiver/receiver.go b/receiver/k8sobjectsreceiver/receiver.go index c1e82babd5dc..43db8b21ece0 100644 --- a/receiver/k8sobjectsreceiver/receiver.go +++ b/receiver/k8sobjectsreceiver/receiver.go @@ -206,7 +206,7 @@ func (kr *k8sobjectsreceiver) doWatch(ctx context.Context, config *K8sObjectsCon case data, ok := <-res: if data.Type == apiWatch.Error { errObject := apierrors.FromObject(data.Object) - // nolint:errorlint + //nolint:errorlint if errObject.(*apierrors.StatusError).ErrStatus.Code == http.StatusGone { kr.setting.Logger.Info("received a 410, grabbing new resource version", zap.Any("data", data)) // we received a 410 so we need to restart diff --git a/receiver/prometheusremotewritereceiver/receiver.go b/receiver/prometheusremotewritereceiver/receiver.go index 24da216a6ea8..741d43929b39 100644 --- a/receiver/prometheusremotewritereceiver/receiver.go +++ b/receiver/prometheusremotewritereceiver/receiver.go @@ -154,7 +154,8 @@ func (prw *prometheusRemoteWriteReceiver) parseProto(contentType string) (promco // translateV2 translates a v2 remote-write request into OTLP metrics. // translate is not feature complete. -// nolint +// +//nolint:unparam func (prw *prometheusRemoteWriteReceiver) translateV2(_ context.Context, req *writev2.Request) (pmetric.Metrics, promremote.WriteResponseStats, error) { var ( badRequestErrors error diff --git a/receiver/receivercreator/config_test.go b/receiver/receivercreator/config_test.go index 2a9a02ec0d74..90533da89476 100644 --- a/receiver/receivercreator/config_test.go +++ b/receiver/receivercreator/config_test.go @@ -147,8 +147,6 @@ func TestInvalidResourceAttributeEndpointType(t *testing.T) { factory := NewFactory() factories.Receivers[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "invalid-resource-attributes.yaml"), factories) require.ErrorContains(t, err, "error reading configuration for \"receiver_creator\": resource attributes for unsupported endpoint type \"not.a.real.type\"") require.Nil(t, cfg) @@ -162,8 +160,6 @@ func TestInvalidReceiverResourceAttributeValueType(t *testing.T) { factory := NewFactory() factories.Receivers[metadata.Type] = factory - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "invalid-receiver-resource-attributes.yaml"), factories) require.ErrorContains(t, err, "error reading configuration for \"receiver_creator\": unsupported `resource_attributes` \"one\" value in examplereceiver/1") require.Nil(t, cfg) diff --git a/receiver/snmpreceiver/client.go b/receiver/snmpreceiver/client.go index caab1fc79bcf..03d6755d2538 100644 --- a/receiver/snmpreceiver/client.go +++ b/receiver/snmpreceiver/client.go @@ -324,7 +324,7 @@ func (c *snmpClient) convertSnmpPDUToSnmpData(pdu gosnmp.SnmpPDU) SNMPData { } // Condense gosnmp data types to our client's simplified data types - switch pdu.Type { // nolint:exhaustive + switch pdu.Type { // Integer types case gosnmp.Counter64, gosnmp.Counter32, gosnmp.Gauge32, gosnmp.Uinteger32, gosnmp.TimeTicks, gosnmp.Integer: value, err := c.toInt64(pdu.Name, pdu.Value) diff --git a/receiver/snmpreceiver/integration_test.go b/receiver/snmpreceiver/integration_test.go index 63a941e2c0e8..2be794bb5efe 100644 --- a/receiver/snmpreceiver/integration_test.go +++ b/receiver/snmpreceiver/integration_test.go @@ -28,8 +28,6 @@ import ( func TestIntegration(t *testing.T) { t.Skip("Broken test, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/36177") - // remove nolint when https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/24240 is resolved - // nolint:staticcheck testCases := []struct { desc string configFilename string @@ -62,8 +60,6 @@ func TestIntegration(t *testing.T) { factory := NewFactory() factories.Receivers[metadata.Type] = factory configFile := filepath.Join("testdata", "integration", testCase.configFilename) - // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 - // nolint:staticcheck cfg, err := otelcoltest.LoadConfigAndValidate(configFile, factories) require.NoError(t, err) snmpConfig := cfg.Receivers[component.NewID(metadata.Type)].(*Config) diff --git a/receiver/sshcheckreceiver/internal/configssh/configssh.go b/receiver/sshcheckreceiver/internal/configssh/configssh.go index cf13f1636059..dbbd29b1845d 100644 --- a/receiver/sshcheckreceiver/internal/configssh/configssh.go +++ b/receiver/sshcheckreceiver/internal/configssh/configssh.go @@ -103,8 +103,8 @@ func (scs *SSHClientSettings) ToClient(_ component.Host, _ component.TelemetrySe switch { case scs.IgnoreHostKey: - // nolint G106 - hkc = ssh.InsecureIgnoreHostKey() //#nosec G106 + //nolint:gosec // #nosec G106 + hkc = ssh.InsecureIgnoreHostKey() case scs.KnownHosts != "": fn, err := knownhosts.New(scs.KnownHosts) if err != nil { diff --git a/versions.yaml b/versions.yaml index 802e23d45cfe..2f15b1d7eb99 100644 --- a/versions.yaml +++ b/versions.yaml @@ -217,6 +217,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/envoyalsreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/expvarreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver