Skip to content

Commit

Permalink
fips: otelcol images for linux and windows
Browse files Browse the repository at this point in the history
  • Loading branch information
jeffreyc-splunk committed Sep 19, 2024
1 parent 4ea0804 commit cbac824
Show file tree
Hide file tree
Showing 9 changed files with 980 additions and 19 deletions.
90 changes: 90 additions & 0 deletions .github/workflows/otelcol-fips.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,3 +79,93 @@ jobs:
if: matrix.FIPSMODE == '0' && steps.run-otelcol.outcome == 'failure'
- run: throw "FIPS disabled, should have failed"
if: matrix.FIPSMODE == '0' && steps.run-otelcol.outcome == 'success'

docker-otelcol-fips:
runs-on: ${{ fromJSON('["ubuntu-20.04", "otel-arm64"]')[matrix.ARCH == 'arm64'] }}
needs: [ otelcol-fips ]
strategy:
matrix:
ARCH: [ amd64, arm64 ]
fail-fast: false
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
cache-dependency-path: '**/go.sum'
- uses: actions/download-artifact@v4
with:
name: otelcol-fips-linux-${{ matrix.ARCH }}
path: ./bin
- run: make docker-otelcol SKIP_COMPILE=true
env:
FIPS: true
ARCH: ${{ matrix.ARCH }}
- name: Ensure the collector container can run with the default config
run: |
docker run -d --name otelcol-fips -e SPLUNK_ACCESS_TOKEN=fake-token -e SPLUNK_REALM=fake-realm otelcol-fips:${{ matrix.ARCH }}
sleep 30
docker logs otelcol-fips
if [ -z "$( docker ps --filter=status=running --filter=name=otelcol-fips -q )" ]; then
exit 1
fi
docker rm -f otelcol-fips
- name: Ensure the collector container can run with all included configs
run: |
for config in cmd/otelcol/fips/config/*.yaml; do
docker run -d --name otelcol-fips -e SPLUNK_ACCESS_TOKEN=fake-token -e SPLUNK_REALM=fake-realm -e SPLUNK_CONFIG=/etc/otel/collector/$(basename $config) otelcol-fips:${{ matrix.ARCH }}
sleep 30
docker logs otelcol-fips
if [ -z "$( docker ps --filter=status=running --filter=name=otelcol-fips -q )" ]; then
exit 1
fi
docker rm -f otelcol-fips
done
- run: docker save -o image.tar otelcol-fips:${{ matrix.ARCH }}
- uses: actions/upload-artifact@v4
with:
name: docker-otelcol-fips-${{ matrix.ARCH }}
path: ./image.tar

win-docker-otelcol-fips:
runs-on: windows-${{ matrix.WIN_VERSION }}
needs: [ otelcol-fips ]
strategy:
matrix:
WIN_VERSION: [ 2019, 2022 ]
fail-fast: false
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/download-artifact@v4
with:
name: otelcol-fips-windows-amd64
path: ./cmd/otelcol/fips/dist
- run: docker build --pull -t otelcol-fips:${{ matrix.WIN_VERSION }} --build-arg BASE_IMAGE=${env:BASE_IMAGE} -f .\cmd\otelcol\fips\Dockerfile.windows .\cmd\otelcol\fips
env:
BASE_IMAGE: mcr.microsoft.com/windows/servercore:ltsc${{ matrix.WIN_VERSION }}
- name: Ensure the collector container can run with the default config
shell: bash
run: |
docker run -d --name otelcol-fips -e SPLUNK_ACCESS_TOKEN=fake-token -e SPLUNK_REALM=fake-realm otelcol-fips:${{ matrix.WIN_VERSION }}
sleep 30
docker logs otelcol-fips
if [ -z "$( docker ps --filter=status=running --filter=name=otelcol-fips -q )" ]; then
exit 1
fi
docker rm -f otelcol-fips
- name: Ensure the collector container can run with all included configs
shell: bash
run: |
for config in cmd/otelcol/fips/config/*.yaml; do
docker run -d --name otelcol-fips -e SPLUNK_ACCESS_TOKEN=fake-token -e SPLUNK_REALM=fake-realm -e SPLUNK_CONFIG="C:\\ProgramData\\Splunk\\OpenTelemetry Collector\\$(basename $config)" otelcol-fips:${{ matrix.WIN_VERSION }}
sleep 30
docker logs otelcol-fips
if [ -z "$( docker ps --filter=status=running --filter=name=otelcol-fips -q )" ]; then
exit 1
fi
docker rm -f otelcol-fips
done
30 changes: 30 additions & 0 deletions cmd/otelcol/fips/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
ARG DOCKER_REPO=docker.io

FROM ${DOCKER_REPO}/alpine:3.17.0 AS certs
RUN apk --update add ca-certificates

FROM ${DOCKER_REPO}/alpine:3.17.0 AS otelcol
ARG TARGETARCH
COPY --chmod=755 dist/otelcol-fips_linux_${TARGETARCH} /otelcol
RUN echo "splunk-otel-collector:x:999:999::/:" > /etc_passwd
# create base dirs since we cannot chown in scratch image except via COPY
RUN mkdir -p /otel/collector /splunk-otel-collector

FROM scratch

COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt

COPY --from=otelcol /etc_passwd /etc/passwd
COPY --from=otelcol --chown=999 /otelcol /
COPY --from=otelcol --chown=999 /otel /etc/otel
COPY --from=otelcol --chown=999 /otel/collector /etc/otel/collector

COPY --chown=999 config/gateway_config.yaml /etc/otel/collector/gateway_config.yaml
COPY --chown=999 config/otlp_config_linux.yaml /etc/otel/collector/otlp_config_linux.yaml
COPY --chown=999 config/agent_config.yaml /etc/otel/collector/agent_config.yaml
COPY --chown=999 config/fargate_config.yaml /etc/otel/collector/fargate_config.yaml
COPY --chown=999 config/ecs_ec2_config.yaml /etc/otel/collector/ecs_ec2_config.yaml

USER splunk-otel-collector
ENTRYPOINT ["/otelcol"]
EXPOSE 13133 14250 14268 4317 4318 6060 8006 8888 9411 9443 9080
28 changes: 28 additions & 0 deletions cmd/otelcol/fips/Dockerfile.windows
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
ARG BASE_IMAGE

FROM ${BASE_IMAGE}

# Setting PowerShell as a default executor.
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]

# Copy the pre-built local binary
WORKDIR "C:\Program Files\Splunk\OpenTelemetry Collector"
COPY dist/otelcol-fips_windows_amd64.exe ./otelcol.exe

# Copy the local config
WORKDIR "C:\ProgramData\Splunk\OpenTelemetry Collector"
COPY config/gateway_config.yaml ./
COPY config/otlp_config_linux.yaml ./
COPY config/agent_config.yaml ./
COPY config/fargate_config.yaml ./
COPY config/ecs_ec2_config.yaml ./

# Enable FIPS
RUN Set-ItemProperty -Path HKLM:\System\CurrentControlSet\Control\Lsa\FipsAlgorithmPolicy -Name Enabled -Value 1

WORKDIR "C:\Program Files\Splunk\OpenTelemetry Collector"

ENV SPLUNK_CONFIG="C:\ProgramData\Splunk\OpenTelemetry Collector\gateway_config.yaml"

ENTRYPOINT [ "otelcol.exe" ]
EXPOSE 13133 14250 14268 4317 6060 8888 9411 9443 9080
205 changes: 205 additions & 0 deletions cmd/otelcol/fips/config/agent_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
# Default configuration file for the Linux (deb/rpm) and Windows MSI collector packages

# If the collector is installed without the Linux/Windows installer script, the following
# environment variables are required to be manually defined or configured below:
# - SPLUNK_ACCESS_TOKEN: The Splunk access token to authenticate requests
# - SPLUNK_API_URL: The Splunk API URL, e.g. https://api.us0.signalfx.com
# - SPLUNK_HEC_TOKEN: The Splunk HEC authentication token
# - SPLUNK_HEC_URL: The Splunk HEC endpoint URL, e.g. https://ingest.us0.signalfx.com/v1/log
# - SPLUNK_INGEST_URL: The Splunk ingest URL, e.g. https://ingest.us0.signalfx.com
# - SPLUNK_LISTEN_INTERFACE: The network interface the agent receivers listen on.
# - SPLUNK_TRACE_URL: The Splunk trace endpoint URL, e.g. https://ingest.us0.signalfx.com/v2/trace

extensions:
health_check:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:13133"
http_forwarder:
ingress:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:6060"
egress:
endpoint: "${SPLUNK_API_URL}"
# Use instead when sending to gateway
#endpoint: "${SPLUNK_GATEWAY_URL}"
zpages:
#endpoint: "${SPLUNK_LISTEN_INTERFACE}:55679"

receivers:
fluentforward:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:8006"
hostmetrics:
collection_interval: 10s
scrapers:
cpu:
disk:
filesystem:
memory:
network:
# System load average metrics https://en.wikipedia.org/wiki/Load_(computing)
load:
# Paging/Swap space utilization and I/O metrics
paging:
# Aggregated system process count metrics
processes:
# System processes metrics, disabled by default
# process:
jaeger:
protocols:
grpc:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:14250"
thrift_binary:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:6832"
thrift_compact:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:6831"
thrift_http:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:14268"
otlp:
protocols:
grpc:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:4317"
http:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:4318"
# This section is used to collect the OpenTelemetry Collector metrics
# Even if just a Splunk APM customer, these metrics are included
prometheus/internal:
config:
scrape_configs:
- job_name: 'otel-collector'
scrape_interval: 10s
static_configs:
- targets: ["${SPLUNK_LISTEN_INTERFACE}:8888"]
metric_relabel_configs:
- source_labels: [ __name__ ]
regex: 'otelcol_rpc_.*'
action: drop
- source_labels: [ __name__ ]
regex: 'otelcol_http_.*'
action: drop
- source_labels: [ __name__ ]
regex: 'otelcol_processor_batch_.*'
action: drop
signalfx:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:9943"
# Whether to preserve incoming access token and use instead of exporter token
# default = false
#access_token_passthrough: true
zipkin:
endpoint: "${SPLUNK_LISTEN_INTERFACE}:9411"
nop:

processors:
batch:
# Enabling the memory_limiter is strongly recommended for every pipeline.
# Configuration is based on the amount of memory allocated to the collector.
# For more information about memory limiter, see
# https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiter/README.md
memory_limiter:
check_interval: 2s
limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB}

# Detect if the collector is running on a cloud system, which is important for creating unique cloud provider dimensions.
# Detector order is important: the `system` detector goes last so it can't preclude cloud detectors from setting host/os info.
# Resource detection processor is configured to override all host and cloud attributes because instrumentation
# libraries can send wrong values from container environments.
# https://docs.splunk.com/Observability/gdi/opentelemetry/components/resourcedetection-processor.html#ordering-considerations
resourcedetection:
detectors: [gcp, ecs, ec2, azure, system]
override: true

# Optional: The following processor can be used to add a default "deployment.environment" attribute to the logs and
# traces when it's not populated by instrumentation libraries.
# If enabled, make sure to enable this processor in a pipeline.
# For more information, see https://docs.splunk.com/Observability/gdi/opentelemetry/components/resource-processor.html
#resource/add_environment:
#attributes:
#- action: insert
#value: staging/production/...
#key: deployment.environment

exporters:
# Traces
sapm:
access_token: "${SPLUNK_ACCESS_TOKEN}"
endpoint: "${SPLUNK_TRACE_URL}"
# Metrics + Events
signalfx:
access_token: "${SPLUNK_ACCESS_TOKEN}"
api_url: "${SPLUNK_API_URL}"
ingest_url: "${SPLUNK_INGEST_URL}"
# Use instead when sending to gateway
#api_url: http://${SPLUNK_GATEWAY_URL}:6060
#ingest_url: http://${SPLUNK_GATEWAY_URL}:9943
sync_host_metadata: true
correlation:
# Entities (applicable only if discovery mode is enabled)
otlphttp/entities:
logs_endpoint: "${SPLUNK_INGEST_URL}/v3/event"
headers:
"X-SF-Token": "${SPLUNK_ACCESS_TOKEN}"
# Logs
splunk_hec:
token: "${SPLUNK_HEC_TOKEN}"
endpoint: "${SPLUNK_HEC_URL}"
source: "otel"
sourcetype: "otel"
profiling_data_enabled: false
# Profiling
splunk_hec/profiling:
token: "${SPLUNK_ACCESS_TOKEN}"
endpoint: "${SPLUNK_INGEST_URL}/v1/log"
log_data_enabled: false
# Send to gateway
otlp:
endpoint: "${SPLUNK_GATEWAY_URL}:4317"
tls:
insecure: true
# Debug
debug:
verbosity: detailed

service:
telemetry:
metrics:
address: "${SPLUNK_LISTEN_INTERFACE}:8888"
extensions: [health_check, http_forwarder, zpages]
pipelines:
traces:
receivers: [jaeger, otlp, zipkin]
processors:
- memory_limiter
- batch
- resourcedetection
#- resource/add_environment
exporters: [sapm, signalfx]
# Use instead when sending to gateway
#exporters: [otlp, signalfx]
metrics:
receivers: [hostmetrics, otlp, signalfx]
processors: [memory_limiter, batch, resourcedetection]
exporters: [signalfx]
# Use instead when sending to gateway
#exporters: [otlp]
metrics/internal:
receivers: [prometheus/internal]
processors: [memory_limiter, batch, resourcedetection]
# When sending to gateway, at least one metrics pipeline needs
# to use signalfx exporter so host metadata gets emitted
exporters: [signalfx]
logs/signalfx:
receivers: [signalfx]
processors: [memory_limiter, batch, resourcedetection]
exporters: [signalfx]
logs/entities:
# Receivers are dynamically added if discovery mode is enabled
receivers: [nop]
processors: [memory_limiter, batch, resourcedetection]
exporters: [otlphttp/entities]
logs:
receivers: [fluentforward, otlp]
processors:
- memory_limiter
- batch
- resourcedetection
#- resource/add_environment
exporters: [splunk_hec, splunk_hec/profiling]
# Use instead when sending to gateway
#exporters: [otlp]
Loading

0 comments on commit cbac824

Please sign in to comment.