-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yaml
469 lines (449 loc) · 15 KB
/
docker-compose.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
volumes:
certs:
driver: local
esdata01:
driver: local
kibanadata:
driver: local
metricbeatdata01:
driver: local
fleetserverdata:
driver: local
networks:
web_net: # Web network (exposed to external users)
driver: bridge
app_net: # Application network (not exposed to users)
driver: bridge
data_net: # Data network (not internet access)
driver: bridge
internal: true
services:
init:
build: ./init
restart: "no"
environment:
DB_USER: ${DB_USER}
DB_PASS: ${DB_PASS}
DB_NAME: ${DB_NAME}
depends_on:
- setup
- db
volumes:
- certs:/certs:rw
- ./database/migrations:/migrations:ro
networks:
- app_net
- data_net
db:
image: bitnami/postgresql:15
restart: always
environment:
POSTGRESQL_USERNAME: ${DB_USER}
POSTGRESQL_PASSWORD: ${DB_PASS}
POSTGRESQL_DATABASE: ${DB_NAME}
POSTGRESQL_TLS_CERT_FILE: /certs/postgres/postgres.crt
POSTGRESQL_TLS_KEY_FILE: /certs/postgres/postgres.key
POSTGRESQL_TLS_CA_FILE: /certs/ca/ca.crt
POSTGRESQL_ENABLE_TLS: "yes"
volumes:
- ./db_data:/var/lib/postgresql/data
- certs:/certs:rw
networks:
- data_net
ports:
- "5432:5432"
healthcheck:
test: ["CMD", "pg_isready", "-U", "${DB_USER}"]
interval: 10s
timeout: 5s
retries: 5
labels:
logging: "enabled"
depends_on:
setup:
condition: service_healthy
backend:
build: ./backend
restart: always
environment:
PORT: 3000
DB_USER: ${DB_USER}
DB_PASS: ${DB_PASS}
DB_HOST: db
DB_PORT: 5432
DB_NAME: ${DB_NAME}
PGSSLCERT: /app/certs/cubos/cubos.crt
PGSSLKEY: /app/certs/cubos/cubos.key
PGSSLROOTCERT: /app/certs/ca/ca.crt
OTEL_TRACES_EXPORTER: otlp
OTEL_METRICS_EXPORTER: otlp
OTEL_LOGS_EXPORTER: otlp
OTEL_EXPORTER_OTLP_PROTOCOL: grpc
OTEL_EXPORTER_OTLP_ENDPOINT: ${OTEL_COLLECTOR_URL}
OTEL_EXPORTER_OTLP_HEADERS: "Authorization=Bearer ${OTEL_TOKEN}"
OTEL_RESOURCE_ATTRIBUTES: "service.name=cubos-backend,service.version=1.0.0,deployment.environment=production"
OTEL_NODE_RESOURCE_DETECTORS: "env,host,os"
NODE_EXTRA_CA_CERTS: /app/certs/ca/ca.crt
OTEL_SEMCONV_STABILITY_OPT_IN: http
OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION: base2_exponential_bucket_histogram
OTEL_LOG_LEVEL: debug
# OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION: explicit_bucket_histogram
depends_on:
db:
condition: service_healthy
init:
condition: service_started
volumes:
- certs:/app/certs:ro
networks:
- app_net
- data_net
nginx:
build:
context: ./nginx
restart: always
ports:
- "80:80"
- "443:443"
depends_on:
- setup
- backend
volumes:
- certs:/etc/nginx/certs:ro
networks:
- web_net
- app_net
postgres-exporter:
image: bitnami/postgres-exporter:latest
environment:
DATA_SOURCE_NAME: postgresql://${DB_USER}:${DB_PASS}@db:5432/${DB_NAME}
PGSSLMODE: verify-full
PGSSLROOTCERT: /certs/ca/ca.crt
PGSSLCERT: /certs/cubos/cubos.crt
PGSSLKEY: /certs/cubos/cubos.key
volumes:
- certs:/certs:ro
# depends_on:
# init:
# condition: service_started
depends_on:
setup:
condition: service_healthy
db:
condition: service_healthy
networks:
- app_net
- data_net
command:
- '--no-collector.wal' # Correct flag to disable WAL collector
labels:
logging: "enabled"
otel-collector:
image: otel/opentelemetry-collector-contrib:0.113.0
container_name: otel-collector
restart: unless-stopped
ports:
- "4317:4317" # gRPC
- "4318:4318" # HTTP
environment:
OTEL_TOKEN: ${OTEL_TOKEN}
ELASTIC_APM_URL: ${ELASTIC_APM_URL}
ELASTIC_APM_TOKEN: ${ELASTIC_APM_TOKEN}
NEWRELIC_API_KEY: ${NEWRELIC_API_KEY}
OTEL_LGTM_URL: ${OTEL_LGTM_URL}
NEWRELIC_URL: ${NEWRELIC_URL}
volumes:
- ./otel-collector/config.yaml:/etc/otel/config.yaml:ro
- certs:/certs:ro
command:
--config /etc/otel/config.yaml
depends_on:
- setup
# - init
- db
networks:
- app_net
otel-lgtm:
image: grafana/otel-lgtm:latest
ports:
- "3000:3000"
# - "4317:4317"
- "9090:9090"
networks:
- app_net
- data_net
volumes:
- ./otel-lgtm/.data/grafana/data:/otel-lgtm/grafana/data
- ./otel-lgtm/.data/prometheus:/data/prometheus
- ./otel-lgtm/.data/loki:/loki
- ./otel-lgtm/grafana-datasources.yaml/:/otel-lgtm/grafana/conf/provisioning/datasources/grafana-datasources.yaml
- ./otel-lgtm/grafana-dashboards.yaml/:/otel-lgtm/grafana/conf/provisioning/dashboards/grafana-dashboards.yaml
# - ./otel-lgtm/grafana.ini:/otel-lgtm/grafana/conf/custom.ini
- ./otel-lgtm/dashboards-json:/otel-lgtm/dashboards-json
- ./otel-lgtm/loki-config.yaml:/otel-lgtm/loki-config.yaml
- ./otel-lgtm/otelcol-config.yaml:/otel-lgtm/otelcol-config.yaml
- ./otel-lgtm/prometheus.yaml:/otel-lgtm/prometheus.yaml
- ./otel-lgtm/tempo-config.yaml:/otel-lgtm/tempo-config.yaml
environment:
# GF_AUTH_ANONYMOUS_ENABLED: false
# GF_AUTH_ANONYMOUS_ORG_ROLE: Admin
ENABLE_LOGS_GRAFANA: true
ENABLE_LOGS_LOKI: true
ENABLE_LOGS_OTELCOL: true
ENABLE_LOGS_TEMPO: true
es01:
depends_on:
setup:
condition: service_healthy
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
labels:
co.elastic.logs/module: elasticsearch
volumes:
- certs:/usr/share/elasticsearch/config/certs
- esdata01:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- discovery.type=single-node
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
- cluster.routing.allocation.disk.watermark.low=20mb
- cluster.routing.allocation.disk.watermark.high=15mb
- cluster.routing.allocation.disk.watermark.flood_stage=10mb
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
mem_limit: ${ES_MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
networks:
- data_net
kibana:
depends_on:
es01:
condition: service_healthy
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
labels:
co.elastic.logs/module: kibana
volumes:
- certs:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
- ./elk/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
# - kibana_config:/usr/share/kibana/config
ports:
- ${KIBANA_PORT}:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=/usr/share/kibana/config/certs/ca/ca.crt
- XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY}
- XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY}
- XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY}
- XPACK_REPORTING_KIBANASERVER_HOSTNAME=localhost
- SERVER_SSL_ENABLED=true
- SERVER_SSL_CERTIFICATE=config/certs/kibana/kibana.crt
- SERVER_SSL_KEY=config/certs/kibana/kibana.key
- SERVER_SSL_CERTIFICATEAUTHORITIES=/usr/share/kibana/config/certs/ca/ca.crt
- ELASTIC_APM_SECRET_TOKEN=${ELASTIC_APM_SECRET_TOKEN}
- KIBANA_FLEET_CA=/usr/share/kibana/config/certs/ca/ca.crt
mem_limit: ${KB_MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -I -s --cacert config/certs/ca/ca.crt https://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
networks:
- app_net
- data_net
setup:
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
- ./elk:/usr/share/kibana/config
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: kibana\n"\
" dns:\n"\
" - kibana\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: fleet-server\n"\
" dns:\n"\
" - fleet-server\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: elastic-agent-apm\n"\
" dns:\n"\
" - elastic-agent-apm\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: otel-collector\n"\
" dns:\n"\
" - otel-collector\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: nginx\n"\
" dns:\n"\
" - nginx\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: postgres\n"\
" dns:\n"\
" - postgres\n"\
" - db\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: cubos\n"\
" dns:\n"\
" - cubos\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Updating ca_trusted_fingerprint in kibana.yml"
sed -i -e "/^\s*xpack\.fleet\.outputs:/,/^\s*xpack\./{
/^\s*-\s*id:\s*elasticsearch\s*$/,/^\s*-\s*id:/{
s/^\(\s*ca_trusted_fingerprint:\s*\).*/\1\"$(openssl x509 -fingerprint -sha256 -noout -in config/certs/ca/ca.crt | sed "s/.*=//;s/://g")\"/
}
}" /usr/share/kibana/config/kibana.yml;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "Extracting CA certificate SHA256 fingerprint"
echo "Updating xpack.fleet.agents.elasticsearch.ca_sha256 in kibana.yml"
CA_FINGERPRINT=$(openssl x509 -fingerprint -sha256 -noout -in config/certs/ca/ca.crt | sed "s/.*=//;s/://g")
echo "All done";
tail -f /dev/null
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
interval: 7s
timeout: 5s
retries: 120
networks:
- data_net
- app_net
fleet-server:
depends_on:
kibana:
condition: service_healthy
es01:
condition: service_healthy
image: docker.elastic.co/beats/elastic-agent:${STACK_VERSION}
volumes:
- certs:/certs
- fleetserverdata:/usr/share/elastic-agent
- "/var/lib/docker/containers:/var/lib/docker/containers:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro"
- "/proc:/hostfs/proc:ro"
- "/:/hostfs:ro"
ports:
- ${FLEET_PORT}:8220
- ${APMSERVER_PORT}:8200
user: root
environment:
- SSL_CERTIFICATE_AUTHORITIES=/certs/ca/ca.crt
- CERTIFICATE_AUTHORITIES=/certs/ca/ca.crt
- FLEET_CA=/certs/ca/ca.crt
- FLEET_ENROLL=1
- FLEET_INSECURE=true
- FLEET_SERVER_ELASTICSEARCH_CA=/certs/ca/ca.crt
- FLEET_SERVER_ELASTICSEARCH_HOST=https://es01:9200
- FLEET_SERVER_ELASTICSEARCH_INSECURE=true
- FLEET_SERVER_ENABLE=1
- FLEET_SERVER_CERT=/certs/fleet-server/fleet-server.crt
- FLEET_SERVER_CERT_KEY=/certs/fleet-server/fleet-server.key
- FLEET_SERVER_INSECURE_HTTP=true
- FLEET_SERVER_POLICY_ID=fleet-server-policy
- FLEET_URL=https://fleet-server:8220
- KIBANA_FLEET_CA=/certs/ca/ca.crt
- KIBANA_FLEET_SETUP=1
- KIBANA_FLEET_USERNAME=elastic
- KIBANA_FLEET_PASSWORD=${ELASTIC_PASSWORD}
- KIBANA_HOST=https://kibana:5601
networks:
- app_net
- data_net
# filebeat:
# image: docker.elastic.co/beats/filebeat:${STACK_VERSION}
# volumes:
# - ./elk/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
# - /var/lib/docker/containers:/var/lib/docker/containers:ro
# - /var/run/docker.sock:/var/run/docker.sock:ro
# - certs:/etc/filebeat/certs:ro
# - ./elk/filebeat/logs:/var/log/filebeat # Mount log directory
# networks:
# - data_net
# - app_net
# environment:
# ELASTICSEARCH_URL: ${ELASTICSEARCH_URL}
# ELASTICSEARCH_FILEBEAT_APIKEY: ${ELASTICSEARCH_FILEBEAT_APIKEY}
# depends_on:
# setup:
# condition: service_healthy