Skip to content

Commit 746d7fb

Browse files
authored
Merge pull request #311
Impl. helmfile modifications for local development and testing
2 parents e2bf5e8 + 80fee3b commit 746d7fb

13 files changed

+458
-2
lines changed

.github/ci_config/k3d-config.yaml

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
# See:
2+
# - https://github.com/k3d-io/k3d/issues/19#issuecomment-1967513596
3+
# - https://github.com/ligfx/k3d-registry-dockerd
4+
apiVersion: k3d.io/v1alpha5
5+
kind: Simple
6+
volumes:
7+
- volume: $HOME/k3d-containerd:/var/lib/rancher/k3s/agent/containerd/
8+
nodeFilters:
9+
- server:0
10+
registries:
11+
create:
12+
image: ligfx/k3d-registry-dockerd:v0.5
13+
proxy:
14+
remoteURL: "*"
15+
volumes:
16+
- /var/run/docker.sock:/var/run/docker.sock
17+
options:
18+
k3s:
19+
extraArgs:
20+
- arg: --disable=helm-controller
21+
nodeFilters:
22+
- server:*
23+
- arg: --disable=traefik
24+
nodeFilters:
25+
- server:*
26+
# Same as CLI parameter `--port '80:80@loadbalancer'`
27+
#ports:
28+
# - port: 80:80
29+
# nodeFilters:
30+
# - loadbalancer

README.md

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ The Kubernetes stack of RADAR-base platform.
3535
- [Volume expansion](#volume-expansion)
3636
- [Uninstall](#uninstall)
3737
- [Update charts](#update-charts)
38+
- [Development automation](#development-automation)
3839
- [Feedback and Contributions](#feedback-and-contributions)
3940

4041
<!-- TOC end -->
@@ -557,6 +558,43 @@ To find any updates to the Helm charts that are listed in the repository, run
557558
bin/chart-updates
558559
```
559560

561+
## Development automation
562+
563+
This repository can be used for development automation for instance on a k3s or k3d (dockerized k3s) cluster. The example below shows how to deploy on a k3d cluster.
564+
565+
1. Install k3d (see [here](https://github.com/k3d-io/k3d#get))
566+
2. Create a k3d cluster that is configured to run RADAR-base
567+
568+
```shell
569+
k3d cluster create my-test-cluster --port '80:80@loadbalancer' --config=.github/ci_config/k3d-config.yaml
570+
```
571+
572+
This example creates a cluster named `my-test-cluster` with a load balancer that forwards local port 80 to the cluster. The
573+
configuration file `.github/ci_config/k3d-config.yaml` is used to configure the cluster. This cluster will be accessible
574+
in _kubectl_ with context name _k3d-my-test-cluster_.
575+
576+
3. Initialize the RADAR-Kubernetes deployment. Run:
577+
578+
```shell
579+
./bin/init
580+
```
581+
582+
4. In file _etc/production.yaml_:
583+
584+
- set _kubeContext_ to _k3d-my-test-cluster_
585+
- set _dev_deployment_ to _true_
586+
- (optional) enable/disable components as needed with the __install_ fields
587+
588+
5. Install RADAR-Kubernetes on the k3d cluster:
589+
590+
```shell
591+
helmfile sync
592+
```
593+
594+
When installation is complete, you can access the applications at `http://localhost`.
595+
596+
597+
560598
## Feedback and Contributions
561599

562600
Enabling RADAR-base community to use RADAR-Kubernetes is important for us. If you have troubles setting up the platform using provided instructions, you can create an dicussion with exact details to reproduce and the expected behavior.

environments.yaml.tmpl

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,17 @@ environments:
88
{{ if not .Values.enable_tls }}
99
- ../mods/disable_tls.yaml
1010
{{ end }}
11+
{{ if not .Values.enable_logging_monitoring }}
12+
- ../mods/disable_monitoring_logging.yaml
13+
{{ end }}
14+
{{ if .Values.dev_deployment }}
15+
- ../mods/disable_tls.yaml
16+
- ../mods/disable_monitoring_logging.yaml
17+
- ../mods/localdev.yaml
18+
- ../mods/minimal.yaml
19+
- ../mods/minimal_kafka.yaml.gotmpl
20+
- ../mods/fast_deploy.yaml
21+
{{ end }}
1122

1223
---
1324

etc/base.yaml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,15 @@ server_name: example.com
1010
maintainer_email: MAINTAINER_EMAIL@example.com
1111
# Number of Kafka pods that will be installed
1212
kafka_num_brokers: 3
13+
# Enable logging and monitoring
14+
enable_logging_monitoring: false
1315
# Enable TLS redirection and retrieval of Let's Encrypt certificates.
1416
# Can be disabled when TLS termination is handled upstream of the on-cluster Nginx reverse proxy.
1517
enable_tls: true
1618

19+
# Minimal deployment for development (disables TLS, monitoring and logging, sets kafka and minio brokers to 1).
20+
dev_deployment: false
21+
1722

1823
# --------------------------------------------------------- 00-init.yaml ---------------------------------------------------------
1924

@@ -460,7 +465,7 @@ minio:
460465
radar_s3_connector:
461466
# set to true if radar-s3-connector should be installed
462467
_install: true
463-
_chart_version: 0.3.4
468+
_chart_version: 0.4.0
464469
_extra_timeout: 90
465470
replicaCount: 1
466471
# The bucket name where intermediate data for cold storage should be written to.

helmfile.d/10-services.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -677,7 +677,7 @@ releases:
677677
value: {{ .Values.confluent_cloud.enabled }}
678678
- name: serviceMonitor.enabled
679679
value: {{ .Values.kube_prometheus_stack._install }}
680-
{{- if .Values.confluent_cloud.enabled }}
680+
{{ if .Values.confluent_cloud.enabled }}
681681
- name: schemaRegistry
682682
value: {{ .Values.confluent_cloud.cc.schemaRegistryUrl }}
683683
- name: bootstrapServers

mods/disable_monitoring_logging.yaml

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
# disables logging (in graylog) and monitoring (with prometheus)
2+
3+
kube_prometheus_stack:
4+
_install: false
5+
mongodb:
6+
_install: false
7+
elasticsearch:
8+
_install: false
9+
graylog:
10+
_install: false
11+
fluent_bit:
12+
_install: false
13+
14+
cert_manager:
15+
prometheus:
16+
servicemonitor:
17+
enabled: false
18+
postgresql:
19+
metrics:
20+
enabled: false
21+
primary:
22+
sidecars: []
23+
timescaledb:
24+
metrics:
25+
enabled: false
26+
primary:
27+
sidecars: []
28+
nginx_ingress:
29+
controller:
30+
metrics:
31+
enabled: false
32+
serviceMonitor:
33+
enabled: false
34+
catalog_server:
35+
prometheus:
36+
jmx:
37+
enabled: false
38+
cp_kafka:
39+
prometheus:
40+
jmx:
41+
enabled: false
42+
cp_zookeeper:
43+
prometheus:
44+
jmx:
45+
enabled: false
46+
cp_schema_registry:
47+
prometheus:
48+
jmx:
49+
enabled: false
50+
redis:
51+
metrics:
52+
enabled: false
53+
minio:
54+
metrics:
55+
serviceMonitor:
56+
enabled: false
57+
prometheusRule:
58+
enabled: false
59+
radar_upload_postgresql:
60+
metrics:
61+
enabled: false
62+
primary:
63+
sidecars: []

mods/fast_deploy.yaml

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
radar_home:
2+
readinessProbe:
3+
periodSeconds: 5
4+
app_config:
5+
readinessProbe:
6+
periodSeconds: 5
7+
app_config_frontend:
8+
readinessProbe:
9+
periodSeconds: 5
10+
management_portal:
11+
readinessProbe:
12+
periodSeconds: 5
13+
radar_appserver:
14+
readinessProbe:
15+
periodSeconds: 5
16+
data_dashboard_backend:
17+
readinessProbe:
18+
periodSeconds: 5
19+
radar_rest_sources_authorizer:
20+
readinessProbe:
21+
periodSeconds: 5
22+
radar_rest_sources_backend:
23+
readinessProbe:
24+
periodSeconds: 5
25+
radar_gateway:
26+
readinessProbe:
27+
periodSeconds: 5
28+
radar_integration:
29+
readinessProbe:
30+
periodSeconds: 5
31+
radar_upload_connect_frontend:
32+
readinessProbe:
33+
periodSeconds: 5
34+
radar_upload_connect_backend:
35+
readinessProbe:
36+
periodSeconds: 5
37+
radar_push_endpoint:
38+
readinessProbe:
39+
periodSeconds: 5
40+
kafka_manager:
41+
readinessProbe:
42+
periodSeconds: 5
43+
ccSchemaRegistryProxy:
44+
readinessProbe:
45+
periodSeconds: 5
46+
cert_manager:
47+
webhook:
48+
readinessProbe:
49+
periodSeconds: 5
50+
minio:
51+
readinessProbe:
52+
periodSeconds: 5
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
SET 'auto.offset.reset' = 'earliest';
2+
3+
-- Register the 'ksql_observations' topic (is created when not exists).
4+
CREATE STREAM observations (
5+
PROJECT VARCHAR KEY, -- 'KEY' means that this field is part of the kafka message key
6+
SUBJECT VARCHAR KEY,
7+
SOURCE VARCHAR KEY,
8+
TOPIC_NAME VARCHAR,
9+
CATEGORY VARCHAR,
10+
VARIABLE VARCHAR,
11+
OBSERVATION_TIME TIMESTAMP,
12+
OBSERVATION_TIME_END TIMESTAMP,
13+
TYPE VARCHAR,
14+
VALUE_NUMERIC DOUBLE,
15+
VALUE_TEXTUAL VARCHAR
16+
) WITH (
17+
kafka_topic = 'ksql_observations',
18+
partitions = 1,
19+
format = 'avro'
20+
);
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
CREATE STREAM questionnaire_app_event (
2+
projectId VARCHAR KEY, -- 'KEY' means that this field is part of the kafka message key
3+
userId VARCHAR KEY,
4+
sourceId VARCHAR KEY,
5+
questionnaireName VARCHAR,
6+
eventType VARCHAR,
7+
time DOUBLE,
8+
metadata MAP<VARCHAR, VARCHAR>
9+
) WITH (
10+
kafka_topic = 'questionnaire_app_event',
11+
partitions = 1,
12+
format = 'avro'
13+
);
14+
15+
INSERT INTO observations
16+
WITH (QUERY_ID='questionnaire_app_event_observations')
17+
SELECT
18+
q.projectId AS PROJECT,
19+
q.userId AS SUBJECT,
20+
q.sourceId AS SOURCE,
21+
'questionnaire_app_event' as TOPIC_NAME,
22+
q.questionnaireName as CATEGORY,
23+
q.eventType as VARIABLE,
24+
FROM_UNIXTIME(CAST(q.time * 1000 AS BIGINT)) as OBSERVATION_TIME,
25+
CAST(NULL as TIMESTAMP) as OBSERVATION_TIME_END,
26+
'STRING_JSON' as TYPE,
27+
CAST(NULL as DOUBLE) as VALUE_NUMERIC,
28+
TO_JSON_STRING(q.metadata) as VALUE_TEXTUAL
29+
FROM questionnaire_app_event q
30+
PARTITION BY q.projectId, q.userId, q.sourceId -- this sets the fields in the kafka message key
31+
EMIT CHANGES;
Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
CREATE STREAM questionnaire_response (
2+
projectId VARCHAR KEY, -- 'KEY' means that this field is part of the kafka message key
3+
userId VARCHAR KEY,
4+
sourceId VARCHAR KEY,
5+
time DOUBLE,
6+
timeCompleted DOUBLE,
7+
timeNotification DOUBLE,
8+
name VARCHAR,
9+
version VARCHAR,
10+
answers ARRAY<STRUCT<questionId VARCHAR, value STRUCT<int INT, string VARCHAR, double DOUBLE>, startTime DOUBLE, endTime DOUBLE>>
11+
) WITH (
12+
kafka_topic = 'questionnaire_response',
13+
partitions = 1,
14+
format = 'avro'
15+
);
16+
17+
CREATE STREAM questionnaire_response_exploded
18+
AS SELECT
19+
EXPLODE(TRANSFORM(q.answers, a => a->questionId)) as VARIABLE,
20+
FROM_UNIXTIME(CAST(q.time * 1000 AS BIGINT)) as OBSERVATION_TIME,
21+
q.projectId,
22+
q.userId,
23+
q.sourceId,
24+
'questionnaire_response' as TOPIC_NAME,
25+
q.name as CATEGORY,
26+
CAST(NULL as TIMESTAMP) as OBSERVATION_TIME_END,
27+
-- WARNING!!! The cast from VARCHAR (string) to DOUBLE will throw an JAVA exception if the string is not a number.
28+
-- This does not mean that the message will be lost. The value will be present in the VALUE_TEXTUAL_OPTIONAL field.
29+
EXPLODE(TRANSFORM(q.answers, a => COALESCE(a->value->double, CAST(a->value->int as DOUBLE), CAST(a->value->string as DOUBLE)))) as VALUE_NUMERIC,
30+
EXPLODE(TRANSFORM(q.answers, a => CASE
31+
WHEN a->value->int IS NOT NULL THEN 'INTEGER'
32+
WHEN a->value->double IS NOT NULL THEN 'DOUBLE'
33+
ELSE NULL
34+
END)) as TYPE,
35+
-- Note: When cast to double works for the string value, the VALUE_TEXTUAL_OPTIONAL will also be set.
36+
EXPLODE(TRANSFORM(q.answers, a => a->value->string)) as VALUE_TEXTUAL_OPTIONAL
37+
FROM questionnaire_response q
38+
EMIT CHANGES;
39+
40+
INSERT INTO observations
41+
WITH (QUERY_ID='questionnaire_response_observations')
42+
SELECT
43+
q.projectId as PROJECT,
44+
q.sourceId as SOURCE,
45+
q.userId as SUBJECT,
46+
TOPIC_NAME, CATEGORY, VARIABLE, OBSERVATION_TIME, OBSERVATION_TIME_END,
47+
CASE
48+
WHEN TYPE IS NULL AND VALUE_NUMERIC IS NOT NULL THEN 'DOUBLE' -- must have been derived from a string cast
49+
WHEN TYPE IS NULL AND VALUE_NUMERIC IS NULL THEN 'STRING'
50+
ELSE TYPE -- keep the original type when TYPE is not NULL
51+
END as TYPE,
52+
VALUE_NUMERIC,
53+
CASE
54+
WHEN VALUE_NUMERIC IS NOT NULL THEN NULL -- When cast to double has worked for the string value, set VALUE_TEXTUAL to NULL.
55+
ELSE VALUE_TEXTUAL_OPTIONAL
56+
END as VALUE_TEXTUAL
57+
FROM questionnaire_response_exploded q
58+
PARTITION BY q.projectId, q.userId, q.sourceId -- this sets the fields in the kafka message key
59+
EMIT CHANGES;
60+
61+
-- TODO: exploding the 'select:' questions is not yet fully designed.
62+
-- I keep the code here for future reference.
63+
-- Multi-select questionnaire questions are stored as a single 'value' string with the
64+
-- names of the selected options separated by comma's. Multiselect questions are prefixed
65+
-- by 'select:' in the questionId.
66+
-- When 'questionId' is like 'select:%' create a new stream with the select options.
67+
-- The options in the value field split commas and added as separate VARIABLE records.
68+
-- The VALUE_NUMERIC is set to 1 and VALUE_TEXTUAL is set to NULL.
69+
-- INSERT INTO observations
70+
-- SELECT
71+
-- EXPLODE(SPLIT(VALUE_TEXTUAL, ',')) as VARIABLE,
72+
-- PROJECT, SOURCE, SUBJECT, TOPIC_NAME, CATEGORY, OBSERVATION_TIME, OBSERVATION_TIME_END,
73+
-- 'INTEGER' as TYPE,
74+
-- CAST(1 as DOUBLE) VALUE_NUMERIC,
75+
-- CAST(NULL as VARCHAR) as VALUE_TEXTUAL
76+
-- FROM questionnaire_response_observations
77+
-- WHERE
78+
-- VARIABLE IS NOT NULL
79+
-- AND VARIABLE LIKE 'select:%'
80+
-- AND VALUE_TEXTUAL IS NOT NULL
81+
-- AND VALUE_TEXTUAL != ''
82+
-- PARTITION BY SUBJECT, PROJECT, SOURCE
83+
-- EMIT CHANGES;

0 commit comments

Comments
 (0)