diff --git a/.github/workflows/otelcol-fips.yml b/.github/workflows/otelcol-fips.yml index c1d6097b8d..98f8690511 100644 --- a/.github/workflows/otelcol-fips.yml +++ b/.github/workflows/otelcol-fips.yml @@ -79,3 +79,93 @@ jobs: if: matrix.FIPSMODE == '0' && steps.run-otelcol.outcome == 'failure' - run: throw "FIPS disabled, should have failed" if: matrix.FIPSMODE == '0' && steps.run-otelcol.outcome == 'success' + + docker-otelcol-fips: + runs-on: ${{ fromJSON('["ubuntu-20.04", "otel-arm64"]')[matrix.ARCH == 'arm64'] }} + needs: [ otelcol-fips ] + strategy: + matrix: + ARCH: [ amd64, arm64 ] + fail-fast: false + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache-dependency-path: '**/go.sum' + - uses: actions/download-artifact@v4 + with: + name: otelcol-fips-linux-${{ matrix.ARCH }} + path: ./bin + - run: make docker-otelcol SKIP_COMPILE=true + env: + FIPS: true + ARCH: ${{ matrix.ARCH }} + - name: Ensure the collector container can run with the default config + run: | + docker run -d --name otelcol-fips -e SPLUNK_ACCESS_TOKEN=fake-token -e SPLUNK_REALM=fake-realm otelcol-fips:${{ matrix.ARCH }} + sleep 30 + docker logs otelcol-fips + if [ -z "$( docker ps --filter=status=running --filter=name=otelcol-fips -q )" ]; then + exit 1 + fi + docker rm -f otelcol-fips + - name: Ensure the collector container can run with all included configs + run: | + for config in cmd/otelcol/fips/config/*.yaml; do + docker run -d --name otelcol-fips -e SPLUNK_ACCESS_TOKEN=fake-token -e SPLUNK_REALM=fake-realm -e SPLUNK_CONFIG=/etc/otel/collector/$(basename $config) otelcol-fips:${{ matrix.ARCH }} + sleep 30 + docker logs otelcol-fips + if [ -z "$( docker ps --filter=status=running --filter=name=otelcol-fips -q )" ]; then + exit 1 + fi + docker rm -f otelcol-fips + done + - run: docker save -o image.tar otelcol-fips:${{ matrix.ARCH }} + - uses: actions/upload-artifact@v4 + with: + name: docker-otelcol-fips-${{ matrix.ARCH }} + path: ./image.tar + + win-docker-otelcol-fips: + runs-on: windows-${{ matrix.WIN_VERSION }} + needs: [ otelcol-fips ] + strategy: + matrix: + WIN_VERSION: [ 2019, 2022 ] + fail-fast: false + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: actions/download-artifact@v4 + with: + name: otelcol-fips-windows-amd64 + path: ./cmd/otelcol/fips/dist + - run: docker build --pull -t otelcol-fips:${{ matrix.WIN_VERSION }} --build-arg BASE_IMAGE=${env:BASE_IMAGE} -f .\cmd\otelcol\fips\Dockerfile.windows .\cmd\otelcol\fips + env: + BASE_IMAGE: mcr.microsoft.com/windows/servercore:ltsc${{ matrix.WIN_VERSION }} + - name: Ensure the collector container can run with the default config + shell: bash + run: | + docker run -d --name otelcol-fips -e SPLUNK_ACCESS_TOKEN=fake-token -e SPLUNK_REALM=fake-realm otelcol-fips:${{ matrix.WIN_VERSION }} + sleep 30 + docker logs otelcol-fips + if [ -z "$( docker ps --filter=status=running --filter=name=otelcol-fips -q )" ]; then + exit 1 + fi + docker rm -f otelcol-fips + - name: Ensure the collector container can run with all included configs + shell: bash + run: | + for config in cmd/otelcol/fips/config/*.yaml; do + docker run -d --name otelcol-fips -e SPLUNK_ACCESS_TOKEN=fake-token -e SPLUNK_REALM=fake-realm -e SPLUNK_CONFIG="C:\\ProgramData\\Splunk\\OpenTelemetry Collector\\$(basename $config)" otelcol-fips:${{ matrix.WIN_VERSION }} + sleep 30 + docker logs otelcol-fips + if [ -z "$( docker ps --filter=status=running --filter=name=otelcol-fips -q )" ]; then + exit 1 + fi + docker rm -f otelcol-fips + done diff --git a/cmd/otelcol/fips/Dockerfile b/cmd/otelcol/fips/Dockerfile new file mode 100644 index 0000000000..e6e2f729b1 --- /dev/null +++ b/cmd/otelcol/fips/Dockerfile @@ -0,0 +1,30 @@ +ARG DOCKER_REPO=docker.io + +FROM ${DOCKER_REPO}/alpine:3.17.0 AS certs +RUN apk --update add ca-certificates + +FROM ${DOCKER_REPO}/alpine:3.17.0 AS otelcol +ARG TARGETARCH +COPY --chmod=755 dist/otelcol-fips_linux_${TARGETARCH} /otelcol +RUN echo "splunk-otel-collector:x:999:999::/:" > /etc_passwd +# create base dirs since we cannot chown in scratch image except via COPY +RUN mkdir -p /otel/collector /splunk-otel-collector + +FROM scratch + +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +COPY --from=otelcol /etc_passwd /etc/passwd +COPY --from=otelcol --chown=999 /otelcol / +COPY --from=otelcol --chown=999 /otel /etc/otel +COPY --from=otelcol --chown=999 /otel/collector /etc/otel/collector + +COPY --chown=999 config/gateway_config.yaml /etc/otel/collector/gateway_config.yaml +COPY --chown=999 config/otlp_config_linux.yaml /etc/otel/collector/otlp_config_linux.yaml +COPY --chown=999 config/agent_config.yaml /etc/otel/collector/agent_config.yaml +COPY --chown=999 config/fargate_config.yaml /etc/otel/collector/fargate_config.yaml +COPY --chown=999 config/ecs_ec2_config.yaml /etc/otel/collector/ecs_ec2_config.yaml + +USER splunk-otel-collector +ENTRYPOINT ["/otelcol"] +EXPOSE 13133 14250 14268 4317 4318 6060 8006 8888 9411 9443 9080 diff --git a/cmd/otelcol/fips/Dockerfile.windows b/cmd/otelcol/fips/Dockerfile.windows new file mode 100644 index 0000000000..f7c39e208b --- /dev/null +++ b/cmd/otelcol/fips/Dockerfile.windows @@ -0,0 +1,28 @@ +ARG BASE_IMAGE + +FROM ${BASE_IMAGE} + +# Setting PowerShell as a default executor. +SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"] + +# Copy the pre-built local binary +WORKDIR "C:\Program Files\Splunk\OpenTelemetry Collector" +COPY dist/otelcol-fips_windows_amd64.exe ./otelcol.exe + +# Copy the local config +WORKDIR "C:\ProgramData\Splunk\OpenTelemetry Collector" +COPY config/gateway_config.yaml ./ +COPY config/otlp_config_linux.yaml ./ +COPY config/agent_config.yaml ./ +COPY config/fargate_config.yaml ./ +COPY config/ecs_ec2_config.yaml ./ + +# Enable FIPS +RUN Set-ItemProperty -Path HKLM:\System\CurrentControlSet\Control\Lsa\FipsAlgorithmPolicy -Name Enabled -Value 1 + +WORKDIR "C:\Program Files\Splunk\OpenTelemetry Collector" + +ENV SPLUNK_CONFIG="C:\ProgramData\Splunk\OpenTelemetry Collector\gateway_config.yaml" + +ENTRYPOINT [ "otelcol.exe" ] +EXPOSE 13133 14250 14268 4317 6060 8888 9411 9443 9080 diff --git a/cmd/otelcol/fips/config/agent_config.yaml b/cmd/otelcol/fips/config/agent_config.yaml new file mode 100644 index 0000000000..10e2ca2611 --- /dev/null +++ b/cmd/otelcol/fips/config/agent_config.yaml @@ -0,0 +1,205 @@ +# Default configuration file for the Linux (deb/rpm) and Windows MSI collector packages + +# If the collector is installed without the Linux/Windows installer script, the following +# environment variables are required to be manually defined or configured below: +# - SPLUNK_ACCESS_TOKEN: The Splunk access token to authenticate requests +# - SPLUNK_API_URL: The Splunk API URL, e.g. https://api.us0.signalfx.com +# - SPLUNK_HEC_TOKEN: The Splunk HEC authentication token +# - SPLUNK_HEC_URL: The Splunk HEC endpoint URL, e.g. https://ingest.us0.signalfx.com/v1/log +# - SPLUNK_INGEST_URL: The Splunk ingest URL, e.g. https://ingest.us0.signalfx.com +# - SPLUNK_LISTEN_INTERFACE: The network interface the agent receivers listen on. +# - SPLUNK_TRACE_URL: The Splunk trace endpoint URL, e.g. https://ingest.us0.signalfx.com/v2/trace + +extensions: + health_check: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:13133" + http_forwarder: + ingress: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:6060" + egress: + endpoint: "${SPLUNK_API_URL}" + # Use instead when sending to gateway + #endpoint: "${SPLUNK_GATEWAY_URL}" + zpages: + #endpoint: "${SPLUNK_LISTEN_INTERFACE}:55679" + +receivers: + fluentforward: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:8006" + hostmetrics: + collection_interval: 10s + scrapers: + cpu: + disk: + filesystem: + memory: + network: + # System load average metrics https://en.wikipedia.org/wiki/Load_(computing) + load: + # Paging/Swap space utilization and I/O metrics + paging: + # Aggregated system process count metrics + processes: + # System processes metrics, disabled by default + # process: + jaeger: + protocols: + grpc: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:14250" + thrift_binary: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:6832" + thrift_compact: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:6831" + thrift_http: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:14268" + otlp: + protocols: + grpc: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:4317" + http: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:4318" + # This section is used to collect the OpenTelemetry Collector metrics + # Even if just a Splunk APM customer, these metrics are included + prometheus/internal: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 10s + static_configs: + - targets: ["${SPLUNK_LISTEN_INTERFACE}:8888"] + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: 'otelcol_rpc_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_http_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_processor_batch_.*' + action: drop + signalfx: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:9943" + # Whether to preserve incoming access token and use instead of exporter token + # default = false + #access_token_passthrough: true + zipkin: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:9411" + nop: + +processors: + batch: + # Enabling the memory_limiter is strongly recommended for every pipeline. + # Configuration is based on the amount of memory allocated to the collector. + # For more information about memory limiter, see + # https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md + memory_limiter: + check_interval: 2s + limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB} + + # Detect if the collector is running on a cloud system, which is important for creating unique cloud provider dimensions. + # Detector order is important: the `system` detector goes last so it can't preclude cloud detectors from setting host/os info. + # Resource detection processor is configured to override all host and cloud attributes because instrumentation + # libraries can send wrong values from container environments. + # https://docs.splunk.com/Observability/gdi/opentelemetry/components/resourcedetection-processor.html#ordering-considerations + resourcedetection: + detectors: [gcp, ecs, ec2, azure, system] + override: true + + # Optional: The following processor can be used to add a default "deployment.environment" attribute to the logs and + # traces when it's not populated by instrumentation libraries. + # If enabled, make sure to enable this processor in a pipeline. + # For more information, see https://docs.splunk.com/Observability/gdi/opentelemetry/components/resource-processor.html + #resource/add_environment: + #attributes: + #- action: insert + #value: staging/production/... + #key: deployment.environment + +exporters: + # Traces + sapm: + access_token: "${SPLUNK_ACCESS_TOKEN}" + endpoint: "${SPLUNK_TRACE_URL}" + # Metrics + Events + signalfx: + access_token: "${SPLUNK_ACCESS_TOKEN}" + api_url: "${SPLUNK_API_URL}" + ingest_url: "${SPLUNK_INGEST_URL}" + # Use instead when sending to gateway + #api_url: http://${SPLUNK_GATEWAY_URL}:6060 + #ingest_url: http://${SPLUNK_GATEWAY_URL}:9943 + sync_host_metadata: true + correlation: + # Entities (applicable only if discovery mode is enabled) + otlphttp/entities: + logs_endpoint: "${SPLUNK_INGEST_URL}/v3/event" + headers: + "X-SF-Token": "${SPLUNK_ACCESS_TOKEN}" + # Logs + splunk_hec: + token: "${SPLUNK_HEC_TOKEN}" + endpoint: "${SPLUNK_HEC_URL}" + source: "otel" + sourcetype: "otel" + profiling_data_enabled: false + # Profiling + splunk_hec/profiling: + token: "${SPLUNK_ACCESS_TOKEN}" + endpoint: "${SPLUNK_INGEST_URL}/v1/log" + log_data_enabled: false + # Send to gateway + otlp: + endpoint: "${SPLUNK_GATEWAY_URL}:4317" + tls: + insecure: true + # Debug + debug: + verbosity: detailed + +service: + telemetry: + metrics: + address: "${SPLUNK_LISTEN_INTERFACE}:8888" + extensions: [health_check, http_forwarder, zpages] + pipelines: + traces: + receivers: [jaeger, otlp, zipkin] + processors: + - memory_limiter + - batch + - resourcedetection + #- resource/add_environment + exporters: [sapm, signalfx] + # Use instead when sending to gateway + #exporters: [otlp, signalfx] + metrics: + receivers: [hostmetrics, otlp, signalfx] + processors: [memory_limiter, batch, resourcedetection] + exporters: [signalfx] + # Use instead when sending to gateway + #exporters: [otlp] + metrics/internal: + receivers: [prometheus/internal] + processors: [memory_limiter, batch, resourcedetection] + # When sending to gateway, at least one metrics pipeline needs + # to use signalfx exporter so host metadata gets emitted + exporters: [signalfx] + logs/signalfx: + receivers: [signalfx] + processors: [memory_limiter, batch, resourcedetection] + exporters: [signalfx] + logs/entities: + # Receivers are dynamically added if discovery mode is enabled + receivers: [nop] + processors: [memory_limiter, batch, resourcedetection] + exporters: [otlphttp/entities] + logs: + receivers: [fluentforward, otlp] + processors: + - memory_limiter + - batch + - resourcedetection + #- resource/add_environment + exporters: [splunk_hec, splunk_hec/profiling] + # Use instead when sending to gateway + #exporters: [otlp] diff --git a/cmd/otelcol/fips/config/ecs_ec2_config.yaml b/cmd/otelcol/fips/config/ecs_ec2_config.yaml new file mode 100644 index 0000000000..76101349d4 --- /dev/null +++ b/cmd/otelcol/fips/config/ecs_ec2_config.yaml @@ -0,0 +1,164 @@ +# This collector config file is designed for use within an ECS task. +# The collector should run in a sidecar container within an ECS task. +config_sources: + env: + defaults: + METRICS_TO_EXCLUDE: [] + ECS_METADATA_EXCLUDED_IMAGES: [] + ECS_TASK_METADATA_ENDPOINT: "${ECS_CONTAINER_METADATA_URI_V4}/task" + ECS_TASK_STATS_ENDPOINT: "${ECS_CONTAINER_METADATA_URI_V4}/task/stats" + +extensions: + health_check: + endpoint: 0.0.0.0:13133 + http_forwarder: + ingress: + endpoint: 0.0.0.0:6060 + egress: + endpoint: "https://api.${SPLUNK_REALM}.signalfx.com" + zpages: + endpoint: 0.0.0.0:55679 + +receivers: + # The fluentforward receiver can be used to forward logs from the Docker fluentd logging driver. + fluentforward: + endpoint: 0.0.0.0:8006 + hostmetrics: + collection_interval: 10s + scrapers: + cpu: + disk: + filesystem: + memory: + network: + # System load average metrics https://en.wikipedia.org/wiki/Load_(computing) + load: + # Paging/Swap space utilization and I/O metrics + paging: + # Aggregated system process count metrics + processes: + # System processes metrics, disabled by default + # process: + jaeger: + protocols: + grpc: + endpoint: 0.0.0.0:14250 + thrift_http: + endpoint: 0.0.0.0:14268 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + # This section is used to collect the OpenTelemetry Collector metrics + # Even if just a Splunk APM customer, these metrics are included + prometheus/internal: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: 'otelcol_rpc_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_http_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_processor_batch_.*' + action: drop + signalfx: + endpoint: 0.0.0.0:9943 + # Whether to preserve incoming access token and use instead of exporter token + # default = false + #access_token_passthrough: true + zipkin: + endpoint: 0.0.0.0:9411 + +processors: + batch: + # Enabling the memory_limiter is strongly recommended for every pipeline. + # Configuration is based on the amount of memory allocated to the collector. + # For more information about memory limiter, see + # https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md + memory_limiter: + check_interval: 2s + limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB} + # detect if the collector is running on a cloud system + # important for creating unique cloud provider dimensions + resourcedetection: + detectors: [ecs] + override: false + # Same as above but overrides resource attributes set by receivers + resourcedetection/internal: + detectors: [ecs] + override: true + # Defines the filter processor with example settings + # Full configuration here: https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor/filterprocessor + filter: + metrics: + exclude: + match_type: regexp + metric_names: ${env:METRICS_TO_EXCLUDE} +# # Optional: The following processor can be used to add a default "deployment.environment" attribute to the logs and +# # traces when it's not populated by instrumentation libraries. +# # If enabled, make sure to enable this processor in the pipeline below. +# resource/add_environment: +# attributes: +# - action: insert +# value: staging/production/... +# key: deployment.environment + +exporters: + # Traces + sapm: + access_token: "${SPLUNK_ACCESS_TOKEN}" + endpoint: "https://ingest.${SPLUNK_REALM}.signalfx.com/v2/trace" + # Metrics + Events + signalfx: + access_token: "${SPLUNK_ACCESS_TOKEN}" + realm: "${SPLUNK_REALM}" + correlation: + # Logs + splunk_hec: + token: "${SPLUNK_HEC_TOKEN}" + endpoint: "${SPLUNK_HEC_URL}" + source: "otel" + sourcetype: "otel" + profiling_data_enabled: false + # Profiling + splunk_hec/profiling: + token: "${SPLUNK_ACCESS_TOKEN}" + endpoint: "${SPLUNK_INGEST_URL}/v1/log" + log_data_enabled: false + +service: + extensions: [health_check, http_forwarder, zpages] + pipelines: + traces: + receivers: [jaeger, otlp, zipkin] + processors: + - memory_limiter + - batch + - resourcedetection + #- resource/add_environment + exporters: [sapm, signalfx] + metrics: + receivers: [hostmetrics, otlp, signalfx] + processors: [memory_limiter, batch, filter, resourcedetection] + exporters: [signalfx] + metrics/internal: + receivers: [prometheus/internal] + processors: [memory_limiter, batch, filter, resourcedetection/internal] + exporters: [signalfx] + logs: + receivers: [otlp, fluentforward] + processors: + - memory_limiter + - batch + - resourcedetection + #- resource/add_environment + exporters: [splunk_hec, splunk_hec/profiling] diff --git a/cmd/otelcol/fips/config/fargate_config.yaml b/cmd/otelcol/fips/config/fargate_config.yaml new file mode 100644 index 0000000000..45207b9b8a --- /dev/null +++ b/cmd/otelcol/fips/config/fargate_config.yaml @@ -0,0 +1,136 @@ +# This collector config file is designed for use within an ECS task. +# The collector should run in a sidecar container within an ECS task. +config_sources: + env: + defaults: + METRICS_TO_EXCLUDE: [] + ECS_METADATA_EXCLUDED_IMAGES: [] + +extensions: + health_check: + endpoint: 0.0.0.0:13133 + http_forwarder: + ingress: + endpoint: 0.0.0.0:6060 + egress: + endpoint: "https://api.${SPLUNK_REALM}.signalfx.com" + zpages: + endpoint: 0.0.0.0:55679 + +receivers: + jaeger: + protocols: + grpc: + endpoint: 0.0.0.0:14250 + thrift_http: + endpoint: 0.0.0.0:14268 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + # This section is used to collect the OpenTelemetry Collector metrics + # Even if just a Splunk APM customer, these metrics are included + prometheus/internal: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: 'otelcol_rpc_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_http_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_processor_batch_.*' + action: drop + signalfx: + endpoint: 0.0.0.0:9943 + # Whether to preserve incoming access token and use instead of exporter token + # default = false + #access_token_passthrough: true + zipkin: + endpoint: 0.0.0.0:9411 + +processors: + batch: + # Enabling the memory_limiter is strongly recommended for every pipeline. + # Configuration is based on the amount of memory allocated to the collector. + # For more information about memory limiter, see + # https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md + memory_limiter: + check_interval: 2s + limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB} + # detect if the collector is running on a cloud system + # important for creating unique cloud provider dimensions + resourcedetection: + detectors: [ecs] + override: false + # Enables the filter processor with example settings + # Full configuration here: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/filterprocessor + # NOTE: These settings need to be change when using this processor + filter/1: + metrics: + exclude: + match_type: regexp + metric_names: ${env:METRICS_TO_EXCLUDE} + # Optional: The following processor can be used to add a default "deployment.environment" attribute to the logs and + # traces when it's not populated by instrumentation libraries. + # If enabled, make sure to enable this processor in the pipeline below. + #resource/add_environment: + #attributes: + #- action: insert + #value: staging/production/... + #key: deployment.environment + +exporters: + # Traces + sapm: + access_token: "${SPLUNK_ACCESS_TOKEN}" + endpoint: "https://ingest.${SPLUNK_REALM}.signalfx.com/v2/trace" + # Metrics + Events + signalfx: + access_token: "${SPLUNK_ACCESS_TOKEN}" + realm: "${SPLUNK_REALM}" + correlation: + # Logs + splunk_hec: + token: "${SPLUNK_HEC_TOKEN}" + endpoint: "${SPLUNK_HEC_URL}" + source: "otel" + sourcetype: "otel" + profiling_data_enabled: false + # Profiling + splunk_hec/profiling: + token: "${SPLUNK_ACCESS_TOKEN}" + endpoint: "${SPLUNK_INGEST_URL}/v1/log" + log_data_enabled: false + +service: + extensions: [health_check, http_forwarder, zpages] + pipelines: + traces: + receivers: [jaeger, otlp, zipkin] + processors: + - memory_limiter + - batch + - resourcedetection + #- resource/add_environment + exporters: [sapm, signalfx] + metrics: + receivers: [otlp, signalfx, prometheus/internal] + processors: [memory_limiter, batch, resourcedetection] + exporters: [signalfx] + logs: + receivers: [otlp] + processors: + - memory_limiter + - batch + - resourcedetection + #- resource/add_environment + exporters: [splunk_hec, splunk_hec/profiling] diff --git a/cmd/otelcol/fips/config/gateway_config.yaml b/cmd/otelcol/fips/config/gateway_config.yaml new file mode 100644 index 0000000000..391e707fe0 --- /dev/null +++ b/cmd/otelcol/fips/config/gateway_config.yaml @@ -0,0 +1,155 @@ +# Configuration file that uses the Splunk exporters (SAPM, SignalFx) to push +# data to Splunk products. + +extensions: + health_check: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:13133" + http_forwarder: + ingress: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:6060" + egress: + endpoint: "https://api.${SPLUNK_REALM}.signalfx.com" + zpages: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:55679" + +receivers: + jaeger: + protocols: + grpc: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:14250" + thrift_binary: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:6832" + thrift_compact: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:6831" + thrift_http: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:14268" + otlp: + protocols: + grpc: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:4317" + http: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:4318" + # This section is used to collect the OpenTelemetry Collector metrics + # Even if just a Splunk APM customer, these metrics are included + prometheus/internal: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 10s + static_configs: + - targets: ['${SPLUNK_LISTEN_INTERFACE}:8888'] + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: 'otelcol_rpc_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_http_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_processor_batch_.*' + action: drop + sapm: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:7276" + # Whether to preserve incoming access token and use instead of exporter token + # default = false + #access_token_passthrough: true + signalfx: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:9943" + # Whether to preserve incoming access token and use instead of exporter token + # default = false + #access_token_passthrough: true + zipkin: + endpoint: "${SPLUNK_LISTEN_INTERFACE}:9411" + +processors: + batch: + # Enabling the memory_limiter is strongly recommended for every pipeline. + # Configuration is based on the amount of memory allocated to the collector. + # For more information about memory limiter, see + # https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md + memory_limiter: + check_interval: 2s + limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB} + + # Optional: The following processor can be used to add a default "deployment.environment" attribute to the traces + # when it's not populated by instrumentation libraries. + # If enabled, make sure to enable this processor in the pipeline below. + #resource/add_environment: + #attributes: + #- action: insert + #value: staging/production/... + #key: deployment.environment + + # Detect if the collector is running on a cloud system. Overrides resource attributes set by receivers. + # Detector order is important: the `system` detector goes last so it can't preclude cloud detectors from setting host/os info. + # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#ordering + resourcedetection/internal: + detectors: [gcp, ecs, ec2, azure, system] + override: true + +exporters: + # Traces + sapm: + access_token: "${SPLUNK_ACCESS_TOKEN}" + endpoint: "https://ingest.${SPLUNK_REALM}.signalfx.com/v2/trace" + sending_queue: + num_consumers: 32 + # Metrics + Events + signalfx: + access_token: "${SPLUNK_ACCESS_TOKEN}" + realm: "${SPLUNK_REALM}" + sending_queue: + num_consumers: 32 + ## Uncomment below if your agents are sending via signalfx exporter + ## to avoid double translations and exclusions. + #translation_rules: [] + #exclude_metrics: [] + signalfx/internal: + access_token: "${SPLUNK_ACCESS_TOKEN}" + realm: "${SPLUNK_REALM}" + sync_host_metadata: true + # Debug + #debug: + #verbosity: detailed + # Logs + splunk_hec: + token: "${SPLUNK_HEC_TOKEN}" + endpoint: "${SPLUNK_HEC_URL}" + source: "otel" + sourcetype: "otel" + profiling_data_enabled: false + # Profiling + splunk_hec/profiling: + token: "${SPLUNK_ACCESS_TOKEN}" + endpoint: "${SPLUNK_INGEST_URL}/v1/log" + log_data_enabled: false + +service: + telemetry: + metrics: + address: "${SPLUNK_LISTEN_INTERFACE}:8888" + extensions: [health_check, http_forwarder, zpages] + pipelines: + traces: + receivers: [jaeger, otlp, sapm, zipkin] + processors: + - memory_limiter + - batch + #- resource/add_environment + exporters: [sapm] + metrics: + receivers: [otlp, signalfx] + processors: [memory_limiter, batch] + exporters: [signalfx] + metrics/internal: + receivers: [prometheus/internal] + processors: [memory_limiter, batch, resourcedetection/internal] + exporters: [signalfx/internal] + logs/signalfx: + receivers: [signalfx] + processors: [memory_limiter, batch] + exporters: [signalfx] + logs: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [splunk_hec, splunk_hec/profiling] diff --git a/cmd/otelcol/fips/config/otlp_config_linux.yaml b/cmd/otelcol/fips/config/otlp_config_linux.yaml new file mode 100644 index 0000000000..d2e6cfebec --- /dev/null +++ b/cmd/otelcol/fips/config/otlp_config_linux.yaml @@ -0,0 +1,123 @@ +# Configuration file that uses the OTLP exporters to push data to Splunk products. +# Currently supports only tracing. + +receivers: + jaeger: + protocols: + grpc: + endpoint: 0.0.0.0:14250 + thrift_binary: + endpoint: 0.0.0.0:6832 + thrift_compact: + endpoint: 0.0.0.0:6831 + thrift_http: + endpoint: 0.0.0.0:14268 + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + # This section is used to collect the OpenTelemetry Collector metrics + # Even if just a Splunk APM customer, these metrics are included + prometheus/internal: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: 'otelcol_rpc_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_http_.*' + action: drop + - source_labels: [ __name__ ] + regex: 'otelcol_processor_batch_.*' + action: drop + sapm: + endpoint: 0.0.0.0:7276 + # Whether to preserve incoming access token and use instead of exporter token + # default = false + #access_token_passthrough: true + signalfx: + endpoint: 0.0.0.0:9943 + zipkin: + endpoint: 0.0.0.0:9411 + +processors: + batch: + # Enabling the memory_limiter is strongly recommended for every pipeline. + # Configuration is based on the amount of memory allocated to the collector. + # For more information about memory limiter, see + # https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md + memory_limiter: + check_interval: 2s + limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB} + + # Optional: The following processor can be used to add a default "deployment.environment" attribute to the traces + # when it's not populated by instrumentation libraries. + # If enabled, make sure to enable this processor in the pipeline below. + #resource/add_environment: + #attributes: + #- action: insert + #value: staging/production/... + #key: deployment.environment + + # Detect if the collector is running on a cloud system. Overrides resource attributes set by receivers. + # Detector order is important: the `system` detector goes last so it can't preclude cloud detectors from setting host/os info. + # https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/resourcedetectionprocessor#ordering + resourcedetection/internal: + detectors: [gcp, ecs, ec2, azure, system] + override: true + +exporters: + # Traces + otlphttp: + traces_endpoint: "https://ingest.${SPLUNK_REALM}.signalfx.com/v2/trace/otlp" + compression: gzip + headers: + "X-SF-Token": "${SPLUNK_ACCESS_TOKEN}" + # Metrics + Events + signalfx: + access_token: "${SPLUNK_ACCESS_TOKEN}" + realm: "${SPLUNK_REALM}" + # Debug + #debug: + #verbosity: detailed + +extensions: + health_check: + endpoint: 0.0.0.0:13133 + http_forwarder: + ingress: + endpoint: 0.0.0.0:6060 + egress: + endpoint: "https://api.${SPLUNK_REALM}.signalfx.com" + zpages: + endpoint: 0.0.0.0:55679 + +service: + extensions: [health_check, http_forwarder, zpages] + pipelines: + traces: + receivers: [jaeger, otlp, zipkin] + processors: + - memory_limiter + - batch + #- resource/add_environment + exporters: [otlphttp, signalfx] + metrics: + receivers: [otlp, signalfx] + processors: [memory_limiter, batch] + exporters: [signalfx] + metrics/internal: + receivers: [prometheus/internal] + processors: [memory_limiter, batch, resourcedetection/internal] + exporters: [signalfx] + logs: + receivers: [signalfx] + processors: [memory_limiter, batch] + exporters: [signalfx] diff --git a/internal/buildscripts/packaging/docker-otelcol.sh b/internal/buildscripts/packaging/docker-otelcol.sh index 0303061f28..db197667a6 100755 --- a/internal/buildscripts/packaging/docker-otelcol.sh +++ b/internal/buildscripts/packaging/docker-otelcol.sh @@ -9,16 +9,27 @@ REPO_DIR="$( cd ${SCRIPT_DIR}/../../../ && pwd )" OTELCOL_DIR="${REPO_DIR}/cmd/otelcol" DIST_DIR="${OTELCOL_DIR}/dist" +FIPS="${FIPS:-false}" PUSH="${PUSH:-false}" ARCH="${ARCH:-amd64}" -IMAGE_NAME="${IMAGE_NAME:-otelcol}" +if [ "$FIPS" != "true" ]; then + IMAGE_NAME="${IMAGE_NAME:-otelcol}" +else + IMAGE_NAME="${IMAGE_NAME:-otelcol-fips}" +fi IMAGE_TAG="${IMAGE_TAG:-latest}" SKIP_COMPILE="${SKIP_COMPILE:-false}" SKIP_BUNDLE="${SKIP_BUNDLE:-false}" DOCKER_REPO="${DOCKER_REPO:-docker.io}" JMX_METRIC_GATHERER_RELEASE="${JMX_METRIC_GATHERER_RELEASE:-}" MULTIARCH_OTELCOL_BUILDER="${MULTIARCH_OTELCOL_BUILDER:-}" -DOCKER_OPTS="--provenance=false --build-arg DOCKER_REPO=${DOCKER_REPO} --build-arg JMX_METRIC_GATHERER_RELEASE=${JMX_METRIC_GATHERER_RELEASE} $OTELCOL_DIR" +if [ "$FIPS" != "true" ]; then + DOCKER_OPTS="--provenance=false --build-arg DOCKER_REPO=${DOCKER_REPO} --build-arg JMX_METRIC_GATHERER_RELEASE=${JMX_METRIC_GATHERER_RELEASE} $OTELCOL_DIR" +else + OTELCOL_DIR="${OTELCOL_DIR}/fips" + DIST_DIR="${OTELCOL_DIR}/dist" + DOCKER_OPTS="--provenance=false --build-arg DOCKER_REPO=${DOCKER_REPO} $OTELCOL_DIR" +fi CONTAINERD_ENABLED="false" if docker info -f '{{ .DriverStatus }}' | grep -q "io.containerd.snapshotter"; then @@ -31,7 +42,12 @@ archs=$(echo $ARCH | tr "," " ") platforms="" for arch in $archs; do - if [[ ! "$arch" =~ ^amd64|arm64|ppc64le$ ]]; then + if [ "$FIPS" != "true" ]; then + if [[ ! "$arch" =~ ^amd64|arm64|ppc64le$ ]]; then + echo "$arch not supported!" >&2 + exit 1 + fi + elif [[ ! "$arch" =~ ^amd64|arm64$ ]]; then echo "$arch not supported!" >&2 exit 1 fi @@ -41,39 +57,53 @@ if [ -d "$DIST_DIR" ]; then rm -rf "$DIST_DIR" fi mkdir -p "$DIST_DIR" -cp "${SCRIPT_DIR}/collect-libs.sh" "$DIST_DIR" + +if [ "$FIPS" != "true" ]; then + cp "${SCRIPT_DIR}/collect-libs.sh" "$DIST_DIR" +fi for arch in $archs; do if [ "$SKIP_COMPILE" != "true" ]; then - make -C "$REPO_DIR" binaries-linux_${arch} + if [ "$FIPS" != "true" ]; then + make -C "$REPO_DIR" binaries-linux_${arch} + else + GOOS=linux GOARCH=$arch make -C "$REPO_DIR" otelcol-fips + fi fi - for bin in otelcol_linux_${arch} migratecheckpoint_linux_${arch}; do + if [ "$FIPS" != "true" ]; then + bins="otelcol_linux_${arch} migratecheckpoint_linux_${arch}" + else + bins="otelcol-fips_linux_${arch}" + fi + for bin in $bins; do if [ ! -f "${REPO_DIR}/bin/${bin}" ]; then echo "${REPO_DIR}/bin/${bin} not found!" >&2 exit 1 fi cp "${REPO_DIR}/bin/${bin}" "$DIST_DIR" done - if [[ "$arch" =~ ^amd64|arm64$ ]]; then - if [ "$SKIP_BUNDLE" != "true" ]; then - make -C "${REPO_DIR}/internal/signalfx-agent/bundle" agent-bundle-linux ARCH=${arch} DOCKER_REPO=${DOCKER_REPO} + if [ "$FIPS" != "true" ]; then + if [[ "$arch" =~ ^amd64|arm64$ ]]; then + if [ "$SKIP_BUNDLE" != "true" ]; then + make -C "${REPO_DIR}/internal/signalfx-agent/bundle" agent-bundle-linux ARCH=${arch} DOCKER_REPO=${DOCKER_REPO} + fi + else + # create a dummy file to copy for the docker build + touch "${REPO_DIR}/dist/agent-bundle_linux_${arch}.tar.gz" fi - else - # create a dummy file to copy for the docker build - touch "${REPO_DIR}/dist/agent-bundle_linux_${arch}.tar.gz" - fi - if [ ! -f "${REPO_DIR}/dist/agent-bundle_linux_${arch}.tar.gz" ]; then - echo "${REPO_DIR}/dist/agent-bundle_linux_${arch}.tar.gz not found!" >&2 - exit 1 + if [ ! -f "${REPO_DIR}/dist/agent-bundle_linux_${arch}.tar.gz" ]; then + echo "${REPO_DIR}/dist/agent-bundle_linux_${arch}.tar.gz not found!" >&2 + exit 1 + fi + cp "${REPO_DIR}/dist/agent-bundle_linux_${arch}.tar.gz" "$DIST_DIR" fi - cp "${REPO_DIR}/dist/agent-bundle_linux_${arch}.tar.gz" "$DIST_DIR" if [[ "$PUSH" = "true" || "$CONTAINERD_ENABLED" = "true" ]]; then platforms="${platforms},linux/${arch}" else # multiarch images must be built and tagged individually when not pushing or not using the containerd image store # https://github.com/docker/buildx/issues/59 - docker buildx build --platform linux/${arch} --tag otelcol:${arch} --load $DOCKER_OPTS - docker tag otelcol:${arch} ${IMAGE_NAME}:${IMAGE_TAG} + docker buildx build --platform linux/${arch} --tag ${IMAGE_NAME}:${arch} --load $DOCKER_OPTS + docker tag ${IMAGE_NAME}:${arch} ${IMAGE_NAME}:${IMAGE_TAG} fi done