From c8fec7c71d11e684eafaa16c5e4a702611789d8c Mon Sep 17 00:00:00 2001 From: Antoine Brun Date: Tue, 30 Sep 2025 15:05:18 +0200 Subject: [PATCH 1/5] update to 3.2.2b658 --- docker-compose.ccla-pack.yml | 18 +- docker-compose.ccla.yml | 10 +- docker-compose.ha.yml | 615 ------------------- docker-compose.yml | 71 ++- helm/Chart.yaml | 4 +- helm/README.md | 19 + helm/images.application.txt | 56 +- helm/templates/02-persistentvolumes.yaml | 40 -- helm/templates/_helpers.tpl | 1 + helm/templates/ccla/ccla-scan-env.yaml | 2 +- helm/templates/ccla/ccla.yaml | 40 +- helm/templates/ccla/key-vault.yaml | 2 +- helm/templates/msa-api.yaml | 65 +- helm/templates/msa-auth.yaml | 47 +- helm/templates/msa-broker.yaml | 43 +- helm/templates/msa-camunda.yaml | 24 +- helm/templates/msa-cerebro.yaml | 21 + helm/templates/msa-db-updater.yaml | 2 +- helm/templates/msa-db.yaml | 54 ++ helm/templates/msa-dev.yaml | 3 + helm/templates/msa-es.yaml | 8 + helm/templates/msa-front.yaml | 18 +- helm/templates/msa-kibana.yaml | 7 + helm/templates/msa-monitor-writer.yaml | 52 +- helm/templates/msa-monitoring.yaml | 8 +- helm/templates/msa-opensearch-dashboard.yaml | 34 +- helm/templates/msa-opensearch.yaml | 15 +- helm/templates/msa-parse.yaml | 3 + helm/templates/msa-rsyslog.yaml | 14 +- helm/templates/msa-secrets.yaml | 2 +- helm/templates/msa-smtp.yaml | 24 +- helm/templates/msa-ui.yaml | 14 +- helm/templates/msa-zipkin.yaml | 22 +- helm/templates/version-configmap.yaml | 2 +- helm/values.public.yaml | 2 +- helm/values.yaml | 139 +++-- 36 files changed, 662 insertions(+), 839 deletions(-) delete mode 100644 docker-compose.ha.yml diff --git a/docker-compose.ccla-pack.yml b/docker-compose.ccla-pack.yml index 550f1d01..f0407d9f 100644 --- a/docker-compose.ccla-pack.yml +++ b/docker-compose.ccla-pack.yml @@ -9,7 +9,7 @@ services: cld-automation-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-automation-installer-3.2.2-sha-6ab68a25a4642ef48314867fbb3add87986812d6 + image: ubiqube/cld-automation-installer:sha-23bcb2379abdb07ea85c0923788dcfcd8f4bae32 logging: <<: *logging volumes: @@ -18,7 +18,7 @@ services: cld-blueprint-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-blueprint-installer-3.2.2-sha-1f4dac22a1467dab2daf4c10acdefb820bb315cc + image: ubiqube/cld-blueprint-installer:sha-1f4dac22a1467dab2daf4c10acdefb820bb315cc logging: <<: *logging volumes: @@ -27,7 +27,7 @@ services: cld-cisco-ms-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-cisco-ms-installer-3.2.2-sha-4251bc22e2900ef0606f9beb458a073c0b903857 + image: ubiqube/cld-cisco-ms-installer:sha-4251bc22e2900ef0606f9beb458a073c0b903857 logging: <<: *logging volumes: @@ -36,7 +36,7 @@ services: cld-fortinet-ms-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-fortinet-ms-installer-3.2.2-sha-d62f1dc7524baebee1acc5123e73084d2e382a26 + image: ubiqube/cld-fortinet-ms-installer:sha-d62f1dc7524baebee1acc5123e73084d2e382a26 logging: <<: *logging volumes: @@ -45,7 +45,7 @@ services: cld-linux-ms-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-linux-ms-installer-3.2.2-sha-ce1222345c5ff881ed7c3c060185e674d2594df0 + image: ubiqube/cld-linux-ms-installer:sha-ce1222345c5ff881ed7c3c060185e674d2594df0 logging: <<: *logging volumes: @@ -54,7 +54,7 @@ services: cld-netskope-ms-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-netskope-ms-installer-3.2.2-sha-1f77ef4f95d0d6374729f4a6cf4a8daa024668a7 + image: ubiqube/cld-netskope-ms-installer:sha-1f77ef4f95d0d6374729f4a6cf4a8daa024668a7 logging: <<: *logging volumes: @@ -63,7 +63,7 @@ services: cld-paloalto-ngfw-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-paloalto-ngfw-installer-3.2.2-sha-35368719b7558753f37ea5df16595cdc3e29acc8 + image: ubiqube/cld-paloalto-ngfw-installer:sha-35368719b7558753f37ea5df16595cdc3e29acc8 logging: <<: *logging volumes: @@ -72,7 +72,7 @@ services: cld-paloalto-prisma-ms-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-paloalto-prisma-ms-installer-3.2.2-sha-29e9ec7352d027301226eb137c2179dea60548a8 + image: ubiqube/cld-paloalto-prisma-ms-installer:sha-29e9ec7352d027301226eb137c2179dea60548a8 logging: <<: *logging volumes: @@ -81,7 +81,7 @@ services: cld-zscaler-ms-installer: depends_on: - msa-dev - image: openmsa/openmsa:cld-zscaler-ms-installer-3.2.2-sha-5e47be5ed07a9e1c810d405bfe2cf36025bfe172 + image: ubiqube/cld-zscaler-ms-installer:sha-93f728c321ed79d10be24fa00ce8bf816d186a45 logging: <<: *logging volumes: diff --git a/docker-compose.ccla.yml b/docker-compose.ccla.yml index 57c0495d..5a2aa9ad 100644 --- a/docker-compose.ccla.yml +++ b/docker-compose.ccla.yml @@ -9,7 +9,7 @@ services: ccla-scan-app: environment: - UBIQUBE_ZAP_TOKEN=7da091fe-63a4-48c0-9bfa-7614c49feb7c - image: openmsa/openmsa:cloudclapp-scan-3.2.2-sha-d1303b2b66dfdafd870b501cdb91b9c5a1dc6465 + image: ubiqube/cloudclapp-scan:sha-d1303b2b66dfdafd870b501cdb91b9c5a1dc6465 ccla-scan-env: entrypoint: - zap.sh @@ -38,11 +38,11 @@ services: test: - CMD-SHELL - curl --fail http://localhost:8080 - image: openmsa/openmsa:cloudclapp-3.2.2-sha-c6e95880c59b6dd9b72468f8f6ee03f1d39147af + image: ubiqube/cloudclapp:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 cloudclapp-bin: depends_on: - msa-dev - image: openmsa/openmsa:msa2-ccla-bin-installer-3.2.2-sha-823a5abecf8c97a44e40314a632b570f5a5eb2a7 + image: ubiqube/msa2-ccla-bin-installer:sha-926a02fb507f520be7670d51eb6036093afbb6bf logging: <<: *logging volumes: @@ -51,7 +51,7 @@ services: cloudclapp-wf: depends_on: - msa-dev - image: openmsa/openmsa:msa2-ccla-wf-installer-3.2.2-sha-cc7bc20fafc364ef4a460f031ad081c24c4276f8 + image: ubiqube/msa2-ccla-wf-installer:sha-e06e1cb61a4e53930f602e9af9e4a3131ccec01b logging: <<: *logging volumes: @@ -103,4 +103,4 @@ services: test: - CMD-SHELL - curl --fail http://localhost:8080 - image: openmsa/openmsa:sase-opslab-3.2.2-sha-c6e95880c59b6dd9b72468f8f6ee03f1d39147af + image: ubiqube/sase-opslab:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 diff --git a/docker-compose.ha.yml b/docker-compose.ha.yml deleted file mode 100644 index a9753ef0..00000000 --- a/docker-compose.ha.yml +++ /dev/null @@ -1,615 +0,0 @@ -x-amqp-monitoring: &amqp-monitoring - AMQP_ADDRESS: core-engine.monitoring - AMQP_PASSWORD: simetraehcapa - AMQP_PORT: "5672" - AMQP_SERVER: msa-broker - AMQP_USER: artemis -x-amqp-syslog: &amqp-syslog - AMQP_ADDRESS: core-engine.syslog - AMQP_PASSWORD: simetraehcapa - AMQP_PORT: "5672" - AMQP_SERVER: msa-broker - AMQP_USER: artemis -x-db-configuration: &db-configuration - UBI_SMS_DB_PASSWORD: Sec_52jlkhin_b4hdh - UBI_SMS_DB_USER: ncgest -x-es-configuration: &es-configuration - ES_CREDENTIALS: c3VwZXJ1c2VyOnheWnl1R002fnU9K2ZZMkc= - ES_SERVERS: msa-es -x-healthcheck: &healthcheck - interval: 30s - retries: 10 - start_period: 120s - timeout: 10s -x-logging: &logging - driver: json-file - options: - max-buffer-size: 4m - max-file: "5" - max-size: 10m - mode: non-blocking -x-placement_app: &placement_app - placement: - constraints: - - node.labels.worker==app - max_replicas_per_node: 1 - replicas: 1 -# for containers that have to run with only one replica -x-placement_app_one_replica: &placement_app_one_replica - placement: - constraints: - - node.labels.worker==app - max_replicas_per_node: 1 - replicas: 1 -x-placement_db: &placement_db - placement: - constraints: - - node.labels.worker==db - max_replicas_per_node: 1 - replicas: 1 -# snmptrap and rsyslog ports are in mode host so we have to configure one replica per worker -x-placement_rsyslog: &placement_rsyslog - placement: - constraints: - - node.labels.worker==app - max_replicas_per_node: 1 - replicas: 1 -x-svn-configuration: &svn-configuration - SVN_PASSWORD: UB1s5n - SVN_USERNAME: ubisvn -configs: - msa-version: - file: ./front/version.html -networks: - default: -# driver_opts: -# encrypted: "true" - -services: - msa-alarm: - depends_on: - - msa-db - - msa-es - - msa-api - deploy: - <<: *placement_app - environment: - <<: - - *db-configuration - - *es-configuration - CONTAINER_DOCKNAME: '{{.Task.Name}}.{{.Node.Hostname}}' - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-alarm-3.2.2-sha-948779a57daf0378d645693404181162a5a2fb6b - logging: - <<: *logging - volumes: - - msa_sms_logs:/opt/sms/logs - - msa_alarmbulkfiles:/opt/sms/spool/alarms - - msa_alarmbulkfiles_err:/opt/sms/spool/alarms-error - msa-api: - depends_on: - - msa-db - deploy: - <<: *placement_app - environment: - <<: *es-configuration - CONTAINER_DOCKNAME: '{{.Task.Name}}.{{.Node.Hostname}}' - HOST_HOSTNAME: '{{.Node.Hostname}}' - UBIQUBE_MONITORING_MODE: mongodb - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - curl -s --fail http://localhost:8480/actuator/health |jq -r '.status' |grep '^UP$$' - image: openmsa/openmsa:msa2-api-3.2.2-sha-48abccedfb8a879dba7c19cd15b834835abd6954 - logging: - <<: *logging - volumes: - - /mnt/NASVolume/msa_dev:/opt/devops/ - - /mnt/NASVolume/rrd_repository:/opt/rrd - - /mnt/NASVolume/msa_entities:/opt/fmc_entities - - /mnt/NASVolume/msa_repository:/opt/fmc_repository - - /mnt/NASVolume/msa_api_keystore:/etc/pki/jentreprise - - /mnt/NASVolume/msa_api_logs:/opt/wildfly/logs - msa-auth: - depends_on: - - msa-db - environment: - KEYCLOAK_ADMIN: admin - KEYCLOAK_ADMIN_PASSWORD: ubiqube - healthcheck: - interval: 30s - retries: 5 - start_period: 1m - test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' - timeout: 10s - image: openmsa/openmsa:msa2-auth-3.2.2-sha-918a03cda7549f6c17a6b456c9f69ce54b9b65c3 - msa-broker: - deploy: - <<: *placement_app - environment: - ARTEMIS_PASSWORD: simetraehcapa - ARTEMIS_USER: artemis - EXTRA_ARGS: --http-host 0.0.0.0 --relax-jolokia --clustered --addresses core-engine.syslog:anycast,core-engine.monitoring:anycast --queues core-engine.syslog:anycast,core-engine.monitoring:anycast - healthcheck: - <<: *healthcheck - test: ./bin/artemis check node --user=$${ARTEMIS_USER} --password=$${ARTEMIS_PASSWORD} --silent &>/dev/null - image: openmsa/openmsa:msa2-broker-3.2.2-sha-7090331f69a90a4d339072bd52b6b4b43744459c - logging: - <<: *logging - volumes: - - /mnt/NASVolume/mano_artemis:/var/lib/artemis-instance - msa-bud: - depends_on: - - msa-db - deploy: - <<: *placement_app - environment: - <<: *db-configuration - CONTAINER_DOCKNAME: '{{.Task.Name}}.{{.Node.Hostname}}' - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-bud-3.2.2-sha-3a6eed07bcea3a1a18d29ad14e7df5b422cbba91 - logging: - <<: *logging - volumes: - - msa_sms_logs:/opt/sms/logs - msa-camunda: - depends_on: - - msa-db - deploy: - <<: *placement_app - environment: - DB_DRIVER: org.postgresql.Driver - DB_PASSWORD: camunda - DB_URL: jdbc:postgresql://msa-db:5432/process-engine - DB_USERNAME: camunda - DB_VALIDATE_ON_BORROW: "true" - DB_VALIDATION_QUERY: SELECT 1 - WAIT_FOR: msa-db:5432 - WAIT_FOR_TIMEOUT: 60 - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - curl -s --fail http://localhost:8080/actuator/health |jq -r '.status' |grep '^UP$$' - image: openmsa/openmsa:msa2-camunda-3.2.2-sha-f9c9e1e4e3cbd3d9bb868048a65f45e5f4fc807c - logging: - <<: *logging - msa-cerebro: - deploy: - <<: *placement_app - entrypoint: - - /opt/cerebro/bin/cerebro - - -Dhosts.0.host=http://msa-es:9200 - environment: - AUTH_TYPE: basic - BASIC_AUTH_PWD: N@X{M4tfw'5%)+35 - BASIC_AUTH_USER: cerebro - healthcheck: - <<: *healthcheck - test: curl --fail http://localhost:9000/ - image: openmsa/openmsa:msa2-cerebro-3.2.2-sha-0142d25b3f4573e92162319f95142c141c302216 - logging: - <<: *logging - ports: - - 9000:9000 - msa-db: - depends_on: - msa-db-updater: - condition: service_completed_successfully - deploy: - <<: *placement_db - environment: - MAX_CONNECTIONS: 1600 - PG_USER: postgres - POSTGRES_PASSWORD: my_db_password - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - pg_isready -h localhost - timeout: 20s - image: openmsa/openmsa:msa2-db-3.2.2-sha-336ff86171aa49922743e35a25c2cb76f68c840b - logging: - <<: *logging - volumes: - - /mnt/NASVolume/msa_db:/var/lib/pgsql/17/data - - target: /dev/shm - tmpfs: - size: 2000000000 - type: tmpfs - msa-db-updater: - environment: - PGAUTO_ONESHOT: yes - POSTGRES_PASSWORD: my_db_password - image: openmsa/openmsa:dev-3.2.2-msa-pg-upgrader - volumes: - - /mnt/NASVolume/msa_db:/var/lib/postgresql/data - msa-dev: - deploy: - <<: *placement_app - healthcheck: - <<: *healthcheck - image: openmsa/openmsa:msa2-linuxdev-3.2.2-sha-3fe0e7b57252bcd7537a3dda8c69db5d324e965a - logging: - <<: *logging - volumes: - - /mnt/NASVolume/msa_entities:/opt/fmc_entities - - /mnt/NASVolume/msa_repository:/opt/fmc_repository - - /mnt/NASVolume/msa_dev:/opt/devops/ - - /mnt/NASVolume/msa_svn:/opt/svnroot - - /mnt/NASVolume/msa_api:/opt/ubi-jentreprise/generated/conf - - /mnt/NASVolume/msa_svn_ws:/opt/sms/spool/routerconfigs - msa-es: - deploy: - <<: *placement_app - environment: - <<: *es-configuration - ES_JAVA_OPTS: -Xms512m -Xmx1024m - bootstrap.memory_lock: "true" - discovery.type: single-node - script.painless.regex.enabled: "true" - xpack.security.enabled: "true" - healthcheck: - <<: *healthcheck - interval: 60s - start_period: 60s - test: | - curl -s -H "Authorization: Basic $${ES_CREDENTIALS}" 'http://localhost:9200/_cluster/health?pretty' |jq -e '.status == "green"' - timeout: 2s - image: openmsa/openmsa:msa2-es-3.2.2-sha-ed8a4af34257776376c4429e8a9a5d6d84fdcca2 - logging: - <<: *logging - ulimits: - memlock: - hard: -1 - soft: -1 - volumes: - - /mnt/NASVolume/msa_es:/usr/share/elasticsearch/data - msa-front: - configs: - - source: msa-version - target: /app/version/index.html - depends_on: - - msa-api - - msa-auth - - msa-cerebro - - msa-kibana - - msa-ui - deploy: - <<: *placement_app - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - curl -sk --fail https://localhost/version/ | grep -E '^\{"jira_version":"\d\.\d\.\d","msa_version":"\d\.\d\.\d","ccla_version":"\d\.\d\.\d","build":"\d+"\}$' - image: openmsa/openmsa:msa2-front-3.2.2-sha-8e68a1259eee188f514534d860cbd2332f53cb7b - logging: - <<: *logging - ports: - - mode: ingress - protocol: tcp - published: 80 - target: 80 - - mode: ingress - protocol: tcp - published: 443 - target: 443 - volumes: - - /mnt/NASVolume/msa_front_conf:/etc/nginx/custom_conf.d - # - # uncomment one of the 2 sections below when installing a custom certificate - # - Docker standard standalone installation - # volumes: - # - "msa_front:/etc/nginx/ssl" - # - Docker Swarm HA installation - # volumes: - # - "/mnt/NASVolume/msa_front:/etc/nginx/ssl" - msa-kibana: - deploy: - <<: *placement_app - environment: - <<: *es-configuration - ELASTICSEARCH_HOSTS: http://msa-es:9200 - ELASTICSEARCH_URL: http://msa-es:9200 - healthcheck: - <<: *healthcheck - test: curl -k --fail http://localhost:5601/kibana/ - image: openmsa/openmsa:msa2-kibana-3.2.2-sha-45e1e850a2ee9316dbfe48f2083e6604ee294df9 - logging: - <<: *logging - ports: - - 5601:5601 - msa-linux: - cap_add: - - sys_rawio - devices: - - /dev/mem:/dev/mem - healthcheck: - test: - - CMD-SHELL - - test -f /usr/bin/install_libraries.sh || echo false - image: openmsa/openmsa:msa2-linuxme-3.2.2-sha-3cc0ba072e49ec8dfc4003e203373158640d47b9 - msa-mongodb: - env_file: - - .env - # ccla must match user created in mongo-init.js - environment: - MONGO_INITDB_ROOT_PASSWORD: ${MONGO_INITDB_ROOT_PASSWORD:-my_db_password} - MONGO_INITDB_ROOT_USERNAME: ${MONGO_INITDB_ROOT_USERNAME:-admin} - healthcheck: - test: echo 'db.runCommand("ping").ok' | mongosh mongodb://${DB_USER:-msaUser}:${DB_PASSWORD:-ubiqube38}@localhost:${DB_PORT:-27017}/${DB_NAME:-msa} --quiet - image: openmsa/openmsa:msa2-mongodb-3.2.2-sha-1b40c09ed18524e2e0561e139537d73f635b69a3 - ports: - - 27017:27017 - restart: unless-stopped - volumes: - - /mnt/NASVolume/msa_mongodb_data:/data/db - - ./mongo/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro - msa-monitor-writer: - depends_on: - - msa-broker - - msa-mongodb - healthcheck: - test: - - CMD-SHELL - - curl -s --fail http://localhost:9099/mon-writer/actuator/health |jq -r '.status' |grep '^UP$$' - image: openmsa/openmsa:msa2-monitor-writer-3.2.2-sha-efa9743b295c4cdce519cfd45b76edeebde05f4f - msa-monitoring: - depends_on: - - msa-broker - - msa-db - - msa-es - - msa-dev - - msa-sms - deploy: - <<: *placement_app_one_replica - environment: - <<: - - *amqp-monitoring - - *db-configuration - - *es-configuration - CONTAINER_DOCKNAME: '{{.Task.Name}}.{{.Node.Hostname}}' - TARGET_MON: AMQP - # possible values RRD, ES, AMQP - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-monitoring-3.2.2-sha-3ed8905abee88ca8f9058f341913f704d2db12fc - logging: - <<: *logging - volumes: - - msa_sms_logs:/opt/sms/logs - - /mnt/NASVolume/msa_dev:/opt/devops/ - - /mnt/NASVolume/msa_entities:/opt/fmc_entities - - /mnt/NASVolume/msa_repository:/opt/fmc_repository - - /mnt/NASVolume/rrd_repository:/opt/rrd - - msa_monitbulkfiles:/opt/sms/spool/parser - - msa_monitbulkfiles_err:/opt/sms/spool/parser-error - msa-parse: - depends_on: - - msa-db - - msa-broker - - msa-es - - msa-dev - deploy: - <<: *placement_app - environment: - <<: - - *amqp-syslog - - *db-configuration - - *es-configuration - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-parse-3.2.2-sha-8fb673ecb2951281f18bddac2f3d8e1ffd02df93 - logging: - <<: *logging - volumes: - - msa_sms_logs:/opt/sms/logs - - /mnt/NASVolume/msa_dev:/opt/devops/ - - msa_parsebulkfiles:/opt/sms/spool/parser - - msa_parsebulkfiles_err:/opt/sms/spool/parser-error - msa-rsyslog: - depends_on: - - msa-broker - deploy: - <<: *placement_rsyslog - environment: - <<: *amqp-syslog - ACTIONTYPE: omamqp1 - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - ps -p 1 -h -o%cpu | awk '{if ($$1 > 99) exit 1; else exit 0;}' - image: openmsa/openmsa:msa2-rsyslog-3.2.2-sha-28f31999aaa5fe05728ea44890aabcc21072409c - logging: - <<: *logging - ports: - # on docker swarm rsyslog port can support only one protocol (TCP or UDP) per port and MUST be in host mode - - mode: host - protocol: udp - published: 514 - target: 514 - - mode: host - protocol: tcp - published: 6514 - target: 6514 - msa-sms: - depends_on: - - msa-db - - msa-dev - deploy: - <<: *placement_app - environment: - <<: - - *db-configuration - - *es-configuration - - *svn-configuration - CONTAINER_DOCKNAME: '{{.Task.Name}}.{{.Node.Hostname}}' - HOST_HOSTNAME: '{{.Node.Hostname}}' - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-sms-3.2.2-sha-6ce9834f41d43c4e53560d4fd9991fb142f77139 - logging: - <<: *logging - ports: - - mode: host - protocol: udp - published: 69 - target: 69 - - mode: host - protocol: udp - published: 5200 - target: 5200 - volumes: - - /mnt/NASVolume/msa_sms_logs:/opt/sms/logs - - /mnt/NASVolume/msa_dev:/opt/devops/ - - /mnt/NASVolume/msa_entities:/opt/fmc_entities - - /mnt/NASVolume/msa_repository:/opt/fmc_repository - - msa_bulkfiles:/opt/sms/spool/parser - - msa_bulkfiles_err:/opt/sms/spool/parser-error - - ./sms/etc/ssh/ssh_config.d/90-ubiqube.conf:/etc/ssh/ssh_config.d/90-ubiqube.conf - msa-smtp: - healthcheck: - interval: 30s - retries: 5 - start_period: 1m - test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/25' - timeout: 10s - image: openmsa/openmsa:msa2-smtp-3.2.2-sha-4b49c82383b22067be87ff969ae87ccf7a1f1f51 - msa-snmptrap: - depends_on: - - msa-db - - msa-es - - msa-dev - deploy: - <<: *placement_rsyslog - environment: - <<: - - *db-configuration - - *es-configuration - healthcheck: - <<: *healthcheck - test: - - CMD-SHELL - - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-snmptrap-3.2.2-sha-2b8dc239672da2e96165bd4e751b7c84330343ca - logging: - <<: *logging - ports: - - mode: host - protocol: udp - published: 162 - target: 162 - volumes: - - msa_sms_logs:/opt/sms/logs - - /mnt/NASVolume/msa_dev:/opt/devops/ - - msa_snmptrapbulkfiles:/opt/sms/spool/parser - - msa_snmptrapbulkfiles_err:/opt/sms/spool/parser-error - msa-svn: - environment: - <<: *svn-configuration - healthcheck: - interval: 30s - retries: 5 - start_period: 1m - test: timeout 10s bash -c 'pgrep svnserve' - timeout: 10s - image: openmsa/openmsa:msa2-svn-3.2.2-sha-2db53c87bde3bbf4f2fc43c1a84898f415fedd07 - volumes: - - msa_svn:/var/svn - msa-ui: - depends_on: - - msa-api - deploy: - <<: *placement_app - environment: - - FEATURE_ADMIN=true - - FEATURE_REPOSITORY=true - - FEATURE_CONNECTION_STATUS=true - - FEATURE_ALARMS=true - - FEATURE_LICENCE=true - - FEATURE_TOPOLOGY=true - - FEATURE_MONITORING_PROFILES=true - - FEATURE_PROFILE_AUDIT_LOGS=true - - FEATURE_PERMISSION_PROFILES=true - - FEATURE_AI_ML=false - - FEATURE_MICROSERVICE_BULK_OPERATION=false - - FEATURE_EDIT_VARIABLES_IN_MICROSERVICE_CONSOLE=true - - FEATURE_WORKFLOW_OWNER=false - - FEATURE_PERMISSION_PROFILE_LABELS=false - - FEATURE_BPM=true - - UBIQUBE_ES_SECURITY_DISABLED=false - - FEATURE_ALARMS_AUTO_CLEARANCE=false - - FEATURE_IMPORT_WITH_SAME_AND_UPPERRANK=true - - FEATURE_REPOFOLDERLIST=[\"Datafiles\", \"Shared\"] - - FEATURE_ALARM_TROUBLE_TICKET=false - healthcheck: - test: - - CMD-SHELL - - curl --fail http://localhost:8080 - image: openmsa/openmsa:msa2-ui-3.2.2-sha-c6e95880c59b6dd9b72468f8f6ee03f1d39147af - logging: - <<: *logging - msa2-es-ilm: - depends_on: - - msa-es - deploy: - placement: - max_replicas_per_node: 1 - replicas: 0 - environment: - <<: *es-configuration - ELASTICSEARCH_URL: msa_es:9200 - UBI_ES_ALARM_INDEX_MULTIPLE_TTL: '*|90d' - UBI_ES_AUDIT_INDEX_MULTIPLE_TTL: '*|90d' - UBI_ES_CACHE_INDEX_DEFAULT_TTL: 1w - UBI_ES_DELETE_SCROLL_SIZE: "4000" - UBI_ES_ILM_LOG_CRONTAB: '*/2 * * * * root php /opt/ubi-es-ilm/log_retention_management.php --verbose=3 > /proc/1/fd/1 2>&1' - # For elasticsearch scripts /opt/ubi-es-ilm/log_retention_management.php - # UBI_ES_INDEX_MULTIPLE_TTL: "type:traffic|7d,type:event|30d,*|90d" - UBI_ES_INDEX_MULTIPLE_TTL: '*|90d' - UBI_ES_LOG_DETENTION_DELETE: "true" - UBI_ES_LOG_SEARCH_INDEX_LIST: ubilogs - UBI_ES_MAX_DOCS: "" - UBI_ES_RETENTION_ALARM_INDEX_NAME: ubialarm* - UBI_ES_RETENTION_AUDIT_INDEX_NAME: ubiaudit* - UBI_ES_RETENTION_INDEX_NAME: ubilogs* - healthcheck: - test: - - CMD-SHELL - - find /opt/msa2-es-ilm/log/log_retention.log -type f -mmin -10 - image: openmsa/openmsa:msa2-es-ilm-3.2.2-sha-e1b9952e7b8a397a603d1159626ef9741c6f2941 - volumes: - - /mnt/NASVolume/msa2_es-ilm:/opt/msa2-es-ilm -volumes: - msa_alarmbulkfiles: - msa_alarmbulkfiles_err: - msa_api_logs: - msa_bulkfiles: - msa_bulkfiles_err: - msa_monitbulkfiles: - msa_monitbulkfiles_err: - msa_parsebulkfiles: - msa_parsebulkfiles_err: - msa_sms_logs: - msa_snmptrapbulkfiles: - msa_snmptrapbulkfiles_err: - msa_svn: diff --git a/docker-compose.yml b/docker-compose.yml index 840cca11..9ff7249a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -34,7 +34,7 @@ services: msa-adapters: depends_on: - msa-dev - image: openmsa/openmsa:msa2-adapters-3.2.2-sha-938854a737b9a7a6a0d2b2c9ef0bc3c7f35ff817 + image: ubiqube/msa2-adapters:sha-7272142683783e432f8834b8ab64a089abbe41a0 logging: <<: *logging volumes: @@ -60,7 +60,7 @@ services: - CMD-SHELL - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 timeout: 5s - image: openmsa/openmsa:msa2-alarm-3.2.2-sha-948779a57daf0378d645693404181162a5a2fb6b + image: ubiqube/msa2-alarm:sha-948779a57daf0378d645693404181162a5a2fb6b logging: <<: *logging volumes: @@ -80,6 +80,7 @@ services: MANAGEMENT_TRACING_ENABLED: true MANAGEMENT_ZIPKIN_TRACING_ENDPOINT: http://msa-zipkin:9411/api/v2/spans UBIQUBE_MONITORING_MODE: mongodb + _JAVA_OPTIONS: "-XX:MaxRAM=8g" healthcheck: interval: 10s retries: 3 @@ -88,7 +89,7 @@ services: - CMD-SHELL - curl -s --fail http://localhost:8480/actuator/health |jq -r '.status' |grep '^UP$$' timeout: 5s - image: openmsa/openmsa:msa2-api-3.2.2-sha-48abccedfb8a879dba7c19cd15b834835abd6954 + image: ubiqube/msa2-api:sha-932b5e70acb87524e4fa1670e75fed08b6ddbb59 logging: <<: *logging volumes: @@ -112,7 +113,7 @@ services: start_period: 1m test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' timeout: 10s - image: openmsa/openmsa:msa2-auth-3.2.2-sha-918a03cda7549f6c17a6b456c9f69ce54b9b65c3 + image: ubiqube/msa2-auth:sha-aa48205a5ece128029a1162f74640f23f70ad6d8 msa-broker: environment: ARTEMIS_PASSWORD: simetraehcapa @@ -120,7 +121,7 @@ services: EXTRA_ARGS: --http-host 0.0.0.0 --relax-jolokia --clustered --addresses core-engine.syslog:anycast,core-engine.monitoring:anycast --queues core-engine.syslog:anycast,core-engine.monitoring:anycast healthcheck: test: ./bin/artemis check node --user=$${ARTEMIS_USER} --password=$${ARTEMIS_PASSWORD} --silent &>/dev/null - image: openmsa/openmsa:msa2-broker-3.2.2-sha-7090331f69a90a4d339072bd52b6b4b43744459c + image: docker.io/ubiqube/msa2-broker:sha-7090331f69a90a4d339072bd52b6b4b43744459c logging: <<: *logging volumes: @@ -140,7 +141,7 @@ services: - CMD-SHELL - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 timeout: 5s - image: openmsa/openmsa:msa2-bud-3.2.2-sha-3a6eed07bcea3a1a18d29ad14e7df5b422cbba91 + image: ubiqube/msa2-bud:sha-3a6eed07bcea3a1a18d29ad14e7df5b422cbba91 logging: <<: *logging volumes: @@ -158,6 +159,7 @@ services: DB_VALIDATION_QUERY: SELECT 1 WAIT_FOR: msa-db:5432 WAIT_FOR_TIMEOUT: 60 + _JAVA_OPTIONS: "-XX:MaxRAM=2g" healthcheck: interval: 10s retries: 3 @@ -166,7 +168,7 @@ services: - CMD-SHELL - curl -s --fail http://localhost:8080/actuator/health |jq -r '.status' |grep '^UP$$' timeout: 5s - image: openmsa/openmsa:msa2-camunda-3.2.2-sha-f9c9e1e4e3cbd3d9bb868048a65f45e5f4fc807c + image: ubiqube/msa2-camunda:sha-f9c9e1e4e3cbd3d9bb868048a65f45e5f4fc807c logging: <<: *logging msa-cerebro: @@ -182,7 +184,7 @@ services: BASIC_AUTH_USER: cerebro healthcheck: test: curl --fail http://localhost:9000/ - image: openmsa/openmsa:msa2-cerebro-3.2.2-sha-0142d25b3f4573e92162319f95142c141c302216 + image: ubiqube/msa2-cerebro:sha-0142d25b3f4573e92162319f95142c141c302216 logging: <<: *logging ports: @@ -203,7 +205,7 @@ services: - CMD-SHELL - pg_isready -h localhost timeout: 20s - image: openmsa/openmsa:msa2-db-3.2.2-sha-336ff86171aa49922743e35a25c2cb76f68c840b + image: ubiqube/msa2-db:sha-336ff86171aa49922743e35a25c2cb76f68c840b logging: <<: *logging shm_size: 1g @@ -213,13 +215,13 @@ services: environment: PGAUTO_ONESHOT: yes POSTGRES_PASSWORD: my_db_password - image: openmsa/openmsa:dev-3.2.2-msa-pg-upgrader + image: ubiqube/dev:msa-pg-upgrader volumes: - msa_db:/var/lib/postgresql/data msa-dev: healthcheck: test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/22' - image: openmsa/openmsa:msa2-linuxdev-3.2.2-sha-3fe0e7b57252bcd7537a3dda8c69db5d324e965a + image: ubiqube/msa2-linuxdev:sha-55c8f5e0d0452ac7c45d4bf2e43bdfd1b62ff2bc logging: <<: *logging volumes: @@ -241,7 +243,7 @@ services: test: | curl -s -H "Authorization: Basic $${ES_CREDENTIALS}" 'http://localhost:9200/_cluster/health?pretty' |jq -e '.status == "green"' timeout: 2s - image: openmsa/openmsa:msa2-es-3.2.2-sha-ed8a4af34257776376c4429e8a9a5d6d84fdcca2 + image: ubiqube/msa2-es:sha-ed8a4af34257776376c4429e8a9a5d6d84fdcca2 logging: <<: *logging ulimits: @@ -267,7 +269,7 @@ services: test: - CMD-SHELL - curl -sk --fail https://localhost/version/ | grep -E '^\{"jira_version":"\d\.\d\.\d","msa_version":"\d\.\d\.\d","ccla_version":"\d\.\d\.\d","build":"\d+"\}$' - image: openmsa/openmsa:msa2-front-3.2.2-sha-8e68a1259eee188f514534d860cbd2332f53cb7b + image: ubiqube/msa2-front:sha-8e68a1259eee188f514534d860cbd2332f53cb7b logging: <<: *logging # uncomment one of the 2 sections below when installing a custom certificate @@ -301,7 +303,7 @@ services: ELASTICSEARCH_URL: http://msa-es:9200 healthcheck: test: curl -k --fail http://localhost:5601/kibana/ - image: openmsa/openmsa:msa2-kibana-3.2.2-sha-45e1e850a2ee9316dbfe48f2083e6604ee294df9 + image: ubiqube/msa2-kibana:sha-45e1e850a2ee9316dbfe48f2083e6604ee294df9 logging: <<: *logging ports: @@ -309,7 +311,7 @@ services: msa-kibana-wf: depends_on: - msa-dev - image: openmsa/openmsa:msa2-wf-kibana-3.2.2-sha-5b183956d2bf2d62423e7ef059e3af35639aad6a + image: ubiqube/msa2-wf-kibana:sha-5b183956d2bf2d62423e7ef059e3af35639aad6a logging: <<: *logging volumes: @@ -324,7 +326,7 @@ services: devices: # required for dmidecode used by polld/asset - /dev/mem:/dev/mem hostname: linux-me - image: openmsa/openmsa:msa2-linuxme-3.2.2-sha-3cc0ba072e49ec8dfc4003e203373158640d47b9 + image: ubiqube/msa2-linuxme:sha-3cc0ba072e49ec8dfc4003e203373158640d47b9 logging: <<: *logging ports: @@ -341,7 +343,7 @@ services: retries: 10 start_period: 5s test: echo 'db.runCommand("ping").ok' | mongosh mongodb://${MSA_MONGODB_USER:-msaUser}:${MSA_MONGODB_PASSWORD:-ubiqube38}@localhost:${MSA_MONGODB_PORT:-27017}/${MSA_MONGODB_NAME:-msa} --quiet - image: openmsa/openmsa:msa2-mongodb-3.2.2-sha-1b40c09ed18524e2e0561e139537d73f635b69a3 + image: ubiqube/msa2-mongodb:sha-1b40c09ed18524e2e0561e139537d73f635b69a3 ports: - 27017:27017 restart: unless-stopped @@ -357,6 +359,7 @@ services: environment: MANAGEMENT_TRACING_ENABLED: true MANAGEMENT_ZIPKIN_TRACING_ENDPOINT: http://msa-zipkin:9411/api/v2/spans + _JAVA_OPTIONS: "-XX:MaxRAM=1g" healthcheck: interval: 5s retries: 10 @@ -364,7 +367,7 @@ services: test: - CMD-SHELL - curl -s --fail http://localhost:9099/mon-writer/actuator/health |jq -r '.status' |grep '^UP$$' - image: openmsa/openmsa:msa2-monitor-writer-3.2.2-sha-efa9743b295c4cdce519cfd45b76edeebde05f4f + image: ubiqube/msa2-monitor-writer:sha-efa9743b295c4cdce519cfd45b76edeebde05f4f msa-monitoring: depends_on: msa-broker: @@ -388,7 +391,7 @@ services: test: - CMD-SHELL - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-monitoring-3.2.2-sha-3ed8905abee88ca8f9058f341913f704d2db12fc + image: ubiqube/msa2-monitoring:sha-3ed8905abee88ca8f9058f341913f704d2db12fc logging: <<: *logging volumes: @@ -402,7 +405,7 @@ services: msa-ms-inventory-management: depends_on: - msa-dev - image: openmsa/openmsa:msa2-ms-inventory-management-3.2.2-sha-b28ae96821300678158d3cc5ff2917be559e14de + image: ubiqube/msa2-ms-inventory-management:sha-b28ae96821300678158d3cc5ff2917be559e14de logging: <<: *logging volumes: @@ -432,7 +435,7 @@ services: test: - CMD-SHELL - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-parse-3.2.2-sha-8fb673ecb2951281f18bddac2f3d8e1ffd02df93 + image: ubiqube/msa2-parse:sha-8fb673ecb2951281f18bddac2f3d8e1ffd02df93 logging: <<: *logging volumes: @@ -442,7 +445,7 @@ services: msa-php-sdk: depends_on: - msa-dev - image: openmsa/openmsa:msa2-php-sdk-installer-3.2.2-sha-de1481e9ba8315b67e5d80e5cac886dca1454ea5 + image: ubiqube/msa2-php-sdk-installer:sha-de1481e9ba8315b67e5d80e5cac886dca1454ea5 logging: <<: *logging volumes: @@ -451,7 +454,7 @@ services: msa-python-sdk: depends_on: - msa-dev - image: openmsa/openmsa:msa2-python-sdk-installer-3.2.2-sha-cf1b245bbd79a7e0f990a772358eeba0d51bfdb1 + image: ubiqube/msa2-python-sdk-installer:sha-cf1b245bbd79a7e0f990a772358eeba0d51bfdb1 logging: <<: *logging volumes: @@ -471,7 +474,7 @@ services: test: - CMD-SHELL - ps -p 1 -h -o%cpu | awk '{if ($$1 > 99) exit 1; else exit 0;}' - image: openmsa/openmsa:msa2-rsyslog-3.2.2-sha-28f31999aaa5fe05728ea44890aabcc21072409c + image: ubiqube/msa2-rsyslog:sha-28f31999aaa5fe05728ea44890aabcc21072409c logging: <<: *logging ports: @@ -500,7 +503,7 @@ services: - *db-configuration - *es-configuration - *svn-configuration - ALLOW_PARALLEL_CALLCOMMANDS: true + ALLOW_PARALLEL_CALLCOMMANDS: false healthcheck: interval: 5s retries: 10 @@ -509,7 +512,7 @@ services: - CMD-SHELL - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 timeout: 5s - image: openmsa/openmsa:msa2-sms-3.2.2-sha-6ce9834f41d43c4e53560d4fd9991fb142f77139 + image: ubiqube/msa2-sms:sha-6ce9834f41d43c4e53560d4fd9991fb142f77139 logging: <<: *logging ports: @@ -537,7 +540,7 @@ services: start_period: 1m test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/25' timeout: 10s - image: openmsa/openmsa:msa2-smtp-3.2.2-sha-4b49c82383b22067be87ff969ae87ccf7a1f1f51 + image: ubiqube/msa2-smtp:sha-4b49c82383b22067be87ff969ae87ccf7a1f1f51 msa-snmptrap: depends_on: msa-db: @@ -557,7 +560,7 @@ services: test: - CMD-SHELL - /opt/sms/bin/sms -e ISALIVE -t 1 | grep -q OK || exit 1 - image: openmsa/openmsa:msa2-snmptrap-3.2.2-sha-2b8dc239672da2e96165bd4e751b7c84330343ca + image: ubiqube/msa2-snmptrap:sha-2b8dc239672da2e96165bd4e751b7c84330343ca logging: <<: *logging ports: @@ -579,13 +582,13 @@ services: start_period: 5s test: timeout 10s bash -c 'pgrep svnserve' timeout: 10s - image: openmsa/openmsa:msa2-svn-3.2.2-sha-2db53c87bde3bbf4f2fc43c1a84898f415fedd07 + image: ubiqube/msa2-svn:sha-2db53c87bde3bbf4f2fc43c1a84898f415fedd07 volumes: - msa_svn:/var/svn msa-topology-wf: depends_on: - msa-dev - image: openmsa/openmsa:msa2-wf-topology-3.2.2-sha-1c902b6691f7b8b2fea301ed90fcb591a9648751 + image: ubiqube/msa2-wf-topology:sha-123d7c227159dbf3550cd1e2a860728b29d91800 logging: <<: *logging volumes: @@ -594,7 +597,7 @@ services: msa-tutorial-wf: depends_on: - msa-dev - image: openmsa/openmsa:msa2-wf-tutorial-3.2.2-sha-e36f85d8936507a484c1adaf0d45764e9e411c9e + image: ubiqube/msa2-wf-tutorial:sha-117f4fafb4beb12dd5a2fc94bc1aa55dd21c6f38 logging: <<: *logging volumes: @@ -629,13 +632,13 @@ services: test: - CMD-SHELL - curl --fail http://localhost:8080 - image: openmsa/openmsa:msa2-ui-3.2.2-sha-c6e95880c59b6dd9b72468f8f6ee03f1d39147af + image: ubiqube/msa2-ui:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 logging: <<: *logging msa-workflow-inventory-management: depends_on: - msa-dev - image: openmsa/openmsa:msa2-workflow-inventory-management-3.2.2-sha-9ee147ef8864e85e4c31ba21a8fae9aaaae1ca3d + image: ubiqube/msa2-workflow-inventory-management:sha-9ee147ef8864e85e4c31ba21a8fae9aaaae1ca3d logging: <<: *logging volumes: @@ -669,7 +672,7 @@ services: UBI_ES_RETENTION_ALARM_INDEX_NAME: ubialarm* UBI_ES_RETENTION_AUDIT_INDEX_NAME: ubiaudit* UBI_ES_RETENTION_INDEX_NAME: ubilogs* - image: openmsa/openmsa:msa2-es-ilm-3.2.2-sha-e1b9952e7b8a397a603d1159626ef9741c6f2941 + image: ubiqube/msa2-es-ilm:sha-e1b9952e7b8a397a603d1159626ef9741c6f2941 restart: unless-stopped volumes: - msa2_es-ilm:/opt/msa2-es-ilm diff --git a/helm/Chart.yaml b/helm/Chart.yaml index 6ed1950a..d2e9cefc 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -appVersion: 3.2.1.392 +appVersion: 3.2.2.658 description: Helm chart for msa and cloudclapp name: msa type: application -version: 3.2.1 +version: 3.2.2 diff --git a/helm/README.md b/helm/README.md index 5a4b03a3..f0991a2e 100644 --- a/helm/README.md +++ b/helm/README.md @@ -85,3 +85,22 @@ yq eval '.. | select(has("image")) | .image' helm/values.yaml ## msa-db-updater `msa-db-updater` is used for updating SQL schema and content. This container is launched only when operaotr/external database is in use. + +## Using official TLS certificates +Split your certificate in two files. `tls.crt` with public keys. `tls.key` with private key. + +```bash +kubectl create secret tls msa-tls \ + --cert=tls.crt \ + --key=tls.key \ + -n msa +``` + +Add the following yaml snippet on helm `upgrade`/`install` +```yaml +global: + tls: + - hosts: + - "msa.ubiqube.com" + secretName: msa-tls +``` diff --git a/helm/images.application.txt b/helm/images.application.txt index 841494f1..2651ad34 100644 --- a/helm/images.application.txt +++ b/helm/images.application.txt @@ -2,15 +2,15 @@ # /!\ To avoid merge conflicts, please skip aline between images. # -boky/postfix:4.2.1-alpine +ubiqube/msa2-smtp:sha-4b49c82383b22067be87ff969ae87ccf7a1f1f51 busybox:1.37 docker.io/openzipkin/zipkin -docker.io/ubiqube/msa2-ccla-wf-installer:sha-a496e2bd5cd9121f99d5050e99ec2591c58dcfd5 +docker.io/ubiqube/msa2-ccla-wf-installer:sha-e06e1cb61a4e53930f602e9af9e4a3131ccec01b -docker.io/ubiqube/msa2-mariadb:sha-bf833c7d14957bf03013f1798dc4919cd84ba1b9 +docker.io/ubiqube/msa2-mariadb:sha-f2fb3a804060df12cbece26b2485b1f7934c63c7 hashicorp/vault:latest @@ -18,57 +18,57 @@ mongo:7.0 opensearchproject/opensearch-dashboards:1.3.20 -ubiqube/cloudclapp-scan:sha-0c453d7dc6b1fe51db9f444439b8a4271bfff5f4 +ubiqube/cloudclapp-scan:sha-d1303b2b66dfdafd870b501cdb91b9c5a1dc6465 -ubiqube/cloudclapp:sha-2dd82e24b1e83cb64877f5e3f0a15b8844457c5c +ubiqube/cloudclapp:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 -ubiqube/sase-opslab:sha-2dd82e24b1e83cb64877f5e3f0a15b8844457c5c +ubiqube/sase-opslab:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 -ubiqube/msa2-alarm:sha-9b99c5171107203ae28c5f9f501371c46d244c2b +ubiqube/msa2-alarm:sha-948779a57daf0378d645693404181162a5a2fb6b -ubiqube/msa2-api:sha-520736a182a77911c62dd5785decad9235003143 +ubiqube/msa2-api:sha-932b5e70acb87524e4fa1670e75fed08b6ddbb59 -ubiqube/msa2-auth:sha-583e0c3fca70d231b1db52e07c7d70d5f1b7dd29 +ubiqube/msa2-auth:sha-aa48205a5ece128029a1162f74640f23f70ad6d8 -ubiqube/msa2-broker:sha-f610df08d8c9df52148ed6c85a82c5db1d16cc1d +ubiqube/msa2-broker:sha-7090331f69a90a4d339072bd52b6b4b43744459c -ubiqube/msa2-bud:sha-6770e77371f2710851600a411f455561a14385e7 +ubiqube/msa2-bud:sha-3a6eed07bcea3a1a18d29ad14e7df5b422cbba91 -ubiqube/msa2-camunda:sha-0521c708e11ff65f3912e92d5008af81d5661a52 +ubiqube/msa2-camunda:sha-f9c9e1e4e3cbd3d9bb868048a65f45e5f4fc807c -ubiqube/msa2-cerebro:sha-c1f46f535cf853d40ee4e11f536740ac7332a0d7 +ubiqube/msa2-cerebro:sha-0142d25b3f4573e92162319f95142c141c302216 -ubiqube/msa2-db:sha-0eaaedf0ece58ecb0eb03a456b1de7d970e7f0e3 +ubiqube/msa2-db:sha-336ff86171aa49922743e35a25c2cb76f68c840b -ubiqube/msa2-es-ilm:sha-892f159577edf57329d4bf843cdc3d6a5c8199db +ubiqube/msa2-es-ilm:sha-e1b9952e7b8a397a603d1159626ef9741c6f2941 -ubiqube/msa2-front:sha-47144de93d5c83b9926931b86e71b3afa041bd31 +ubiqube/msa2-front:sha-8e68a1259eee188f514534d860cbd2332f53cb7b -ubiqube/msa2-es:sha-600197f92e1e7d80f93ed8e7ee579dbb2d0459ed +ubiqube/msa2-es:sha-ed8a4af34257776376c4429e8a9a5d6d84fdcca2 -ubiqube/msa2-kibana:sha-dd084941368ff50d48c71340fab6d05c2282fb03 +ubiqube/msa2-kibana:sha-45e1e850a2ee9316dbfe48f2083e6604ee294df9 -ubiqube/msa2-linuxdev:sha-e3a765f744035ff972556adb388a575337e380c1 +ubiqube/msa2-linuxdev:sha-55c8f5e0d0452ac7c45d4bf2e43bdfd1b62ff2bc -ubiqube/msa2-linuxme:sha-08392b5f84076cbbf8e3aedc0835a2d263a70781 +ubiqube/msa2-linuxme:sha-3cc0ba072e49ec8dfc4003e203373158640d47b9 -ubiqube/msa2-monitor-writer:sha-1f495379d3e517d154551c45cda960c1d04e878a +ubiqube/msa2-monitor-writer:sha-efa9743b295c4cdce519cfd45b76edeebde05f4f -ubiqube/msa2-monitoring:sha-ef9b814467361d9a1858448d89448dcb26ca5028 +ubiqube/msa2-monitoring:sha-3ed8905abee88ca8f9058f341913f704d2db12fc -ubiqube/msa2-opensearch:sha-fc2c1a1975e58c96605f31bf78483f29b1d3db81 +ubiqube/msa2-opensearch:sha-8d70d2b01f1cd3643605126871f8a1a5c8b3d548 -ubiqube/msa2-parse:sha-0d09a8d0ecd2db050b57fd1839e30f3b0cda6762 +ubiqube/msa2-parse:sha-8fb673ecb2951281f18bddac2f3d8e1ffd02df93 ubiqube/msa2-rsyslog:sha-28f31999aaa5fe05728ea44890aabcc21072409c -ubiqube/msa2-sms:sha-105b39809742eedd920ddfc737facb8c8d9f1d4b +ubiqube/msa2-sms:sha-6ce9834f41d43c4e53560d4fd9991fb142f77139 -ubiqube/msa2-snmptrap:sha-a779ce99dd7e8bc38656830e5ecab15a1a7074d7 +ubiqube/msa2-snmptrap:sha-2b8dc239672da2e96165bd4e751b7c84330343ca -ubiqube/msa2-svn:sha-1445c4fa41074acd31f26105035cd158662cd9c2 +ubiqube/msa2-svn:sha-2db53c87bde3bbf4f2fc43c1a84898f415fedd07 -ubiqube/msa2-ui:sha-2dd82e24b1e83cb64877f5e3f0a15b8844457c5c +ubiqube/msa2-ui:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 zaproxy/zap-stable:2.16.1 diff --git a/helm/templates/02-persistentvolumes.yaml b/helm/templates/02-persistentvolumes.yaml index 1220ef81..4af16ca5 100644 --- a/helm/templates/02-persistentvolumes.yaml +++ b/helm/templates/02-persistentvolumes.yaml @@ -112,34 +112,6 @@ spec: --- apiVersion: v1 kind: PersistentVolumeClaim -metadata: - labels: - {{- include "msa.labels" . | nindent 4 }} - name: msa-alarmbulkfiles -spec: - accessModes: - - ReadWriteOnce - {{ if .Values.defaultStorageClassName }}storageClassName: "{{ .Values.defaultStorageClassName }}"{{ end }} - resources: - requests: - storage: {{ .Values.pvcStorageSizes.msaAlarmbulkfiles }} ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - {{- include "msa.labels" . | nindent 4 }} - name: msa-alarmbulkfiles-err -spec: - accessModes: - - ReadWriteOnce - {{ if .Values.defaultStorageClassName }}storageClassName: "{{ .Values.defaultStorageClassName }}"{{ end }} - resources: - requests: - storage: {{ .Values.pvcStorageSizes.msaAlarmbulkfilesErr }} ---- -apiVersion: v1 -kind: PersistentVolumeClaim metadata: labels: {{- include "msa.labels" . | nindent 4 }} @@ -272,18 +244,6 @@ spec: --- apiVersion: v1 kind: PersistentVolumeClaim -metadata: - name: rrd-repository -spec: - accessModes: - - ReadWriteMany - {{ if .Values.sharedStorageClassName }}storageClassName: "{{ .Values.sharedStorageClassName }}"{{ end }} - resources: - requests: - storage: {{ .Values.pvcStorageSizes.rrdRepository }} ---- -apiVersion: v1 -kind: PersistentVolumeClaim metadata: labels: {{- include "msa.labels" . | nindent 4 }} diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl index 1a7623cd..7e22fe41 100644 --- a/helm/templates/_helpers.tpl +++ b/helm/templates/_helpers.tpl @@ -271,6 +271,7 @@ Create a broker URL string. {{- if ( .Values.global.artemis.port.core ) -}} {{ (print ":" .Values.global.artemis.port.core) }} {{- end -}} +{{ (print "?consumerWindowSize=0") -}} {{- end -}} {{/* diff --git a/helm/templates/ccla/ccla-scan-env.yaml b/helm/templates/ccla/ccla-scan-env.yaml index 59eff68c..c91b875d 100644 --- a/helm/templates/ccla/ccla-scan-env.yaml +++ b/helm/templates/ccla/ccla-scan-env.yaml @@ -1,4 +1,4 @@ -{{- if .Values.installCCLA -}} +{{- if and .Values.installCCLA .Values.ccla_scan_env.enabled -}} apiVersion: apps/v1 kind: Deployment metadata: diff --git a/helm/templates/ccla/ccla.yaml b/helm/templates/ccla/ccla.yaml index a0fed40d..6c59d9e0 100644 --- a/helm/templates/ccla/ccla.yaml +++ b/helm/templates/ccla/ccla.yaml @@ -35,8 +35,6 @@ spec: value: "https://cloudclapp.com/EndUserLicenceAgreement.html" - name: FEATURE_KIALI_DASHBOARD value: "true" - - name: FEATURE_DEMO_DEPLOYMENT_WIZARD - value: "false" --- apiVersion: v1 @@ -52,22 +50,56 @@ spec: targetPort: 8080 {{- if .Values.global.ingress.enable }} --- +# Ingress for /ccla apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: cloudclapp + name: cloudclapp-ccla-ingress annotations: nginx.ingress.kubernetes.io/rewrite-target: /$2 spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} + rules: + - http: + paths: + - path: /cloudclapp(/|$)(.*) + pathType: Prefix + backend: + service: + name: cloudclapp + port: + number: 8080 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} +--- +# Ingress for /sasemap +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cloudclapp-sasemap-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /sasemap/$2 +spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} rules: - http: paths: - - path: /ccla(/|$)(.*) + - path: /sasemap(/|$)(.*) pathType: Prefix backend: service: name: cloudclapp port: number: 8080 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} {{- end -}} {{- end -}} diff --git a/helm/templates/ccla/key-vault.yaml b/helm/templates/ccla/key-vault.yaml index 0d29f7e8..a67a9916 100644 --- a/helm/templates/ccla/key-vault.yaml +++ b/helm/templates/ccla/key-vault.yaml @@ -1,4 +1,4 @@ -{{- if .Values.installCCLA -}} +{{- if and .Values.installCCLA .Values.key_vault.enabled -}} apiVersion: apps/v1 kind: Deployment metadata: diff --git a/helm/templates/msa-api.yaml b/helm/templates/msa-api.yaml index 87ed109e..78e6f8b2 100755 --- a/helm/templates/msa-api.yaml +++ b/helm/templates/msa-api.yaml @@ -36,6 +36,8 @@ spec: name: msa-api-logs containers: - env: + - name: _JAVA_OPTIONS + value: "-XX:MaxRAM=8g" {{- if .Values.msa_zipkin.enable }} - name: MANAGEMENT_TRACING_ENABLED value: "true" @@ -72,7 +74,16 @@ spec: secretKeyRef: name: msa-oidc-secret key: workflowClientSecret +{{- if .Values.msa_api.env }} + {{- toYaml .Values.msa_api.env | nindent 12 }} +{{- end }} {{- include "msa.setSpringBootJdbcUrl" (list . "POSTGRESQL?ApplicationName=msa-api") | nindent 12 }} +{{- if .Values.msa_zipkin.enabled }} + - name: MANAGEMENT_TRACING_ENABLED + value: "true" + - name: MANAGEMENT_ZIPKIN_TRACING_ENDPOINT + value: http://msa-zipkin:9411/api/v2/spans +{{ end }} image: "{{ .Values.msa_api.image }}" imagePullPolicy: {{ .Values.imagePullPolicy }} livenessProbe: @@ -99,8 +110,6 @@ spec: name: msa-entities - mountPath: /opt/fmc_repository name: msa-repository - - mountPath: /opt/rrd - name: rrd-repository - mountPath: /opt/wildfly/logs name: msa-api-logs - mountPath: /etc/pki/jentreprise @@ -115,9 +124,6 @@ spec: - name: msa-repository persistentVolumeClaim: claimName: msa-repository - - name: rrd-repository - persistentVolumeClaim: - claimName: rrd-repository - name: msa-api-logs persistentVolumeClaim: claimName: msa-api-logs @@ -149,6 +155,10 @@ kind: Ingress metadata: name: msa-api-ingress spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} rules: - http: paths: @@ -159,6 +169,9 @@ spec: name: msa-api port: number: 8480 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} --- apiVersion: networking.k8s.io/v1 kind: Ingress @@ -169,6 +182,10 @@ metadata: nginx.ingress.kubernetes.io/enable-access-log: "false" name: msa-api-ping-ingress spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} rules: - http: paths: @@ -179,12 +196,19 @@ spec: name: msa-api port: number: 8480 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: msa-api-swagger-config-ingress spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} rules: - http: paths: @@ -195,7 +219,9 @@ spec: name: msa-api port: number: 8480 ---- +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} --- apiVersion: networking.k8s.io/v1 kind: Ingress @@ -205,6 +231,10 @@ metadata: nginx.ingress.kubernetes.io/rewrite-target: /swagger-ui/$1 name: msa-api-swagger-ingress spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} rules: - http: paths: @@ -215,4 +245,27 @@ spec: name: msa-api port: number: 8480 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} {{- end -}} +{{- if .Values.monitoring.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: msa-api + labels: + release: prometheus-operator +spec: + selector: + matchLabels: + service: msa-api + endpoints: + - port: "8480" + interval: 30s + path: /actuator/prometheus + scheme: http + namespaceSelector: + any: true +{{- end }} \ No newline at end of file diff --git a/helm/templates/msa-auth.yaml b/helm/templates/msa-auth.yaml index f1294b39..b99e4ee5 100644 --- a/helm/templates/msa-auth.yaml +++ b/helm/templates/msa-auth.yaml @@ -55,13 +55,19 @@ spec: ports: - containerPort: 8080 livenessProbe: - exec: - command: - - sh - - -cs - - timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' + httpGet: + path: /auth/health/live + port: 9000 + scheme: HTTPS initialDelaySeconds: 60 periodSeconds: 60 + readinessProbe: + httpGet: + path: /auth/health/ready + port: 9000 + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 10 --- apiVersion: v1 kind: Service @@ -77,13 +83,41 @@ spec: targetPort: 8080 selector: app: msa-auth +{{- if .Values.monitoring.enabled }} --- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: msa-auth + labels: + release: prometheus-operator +spec: + selector: + matchLabels: + service: msa-auth + endpoints: + - port: "8080" + interval: 30s + path: /auth/metrics + scheme: http + namespaceSelector: + any: true +{{- end }} {{ if .Values.global.ingress.enable -}} +--- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: msa-auth-ingress + annotations: + nginx.ingress.kubernetes.io/proxy-buffers-number: "4" + nginx.ingress.kubernetes.io/proxy-buffer-size: "256k" + nginx.ingress.kubernetes.io/proxy-busy-buffers-size: "256k" spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} rules: - http: paths: @@ -94,4 +128,7 @@ spec: name: msa-auth port: number: 8080 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} {{- end -}} \ No newline at end of file diff --git a/helm/templates/msa-broker.yaml b/helm/templates/msa-broker.yaml index e952659a..7152057b 100644 --- a/helm/templates/msa-broker.yaml +++ b/helm/templates/msa-broker.yaml @@ -63,6 +63,27 @@ spec: containerPort: 5672 - containerPort: 1883 - containerPort: 61613 + volumeMounts: + - name: msa-broker-data + mountPath: /var/lib/artemis-instance/ + volumes: + - name: msa-broker-data + persistentVolumeClaim: + claimName: msa-broker +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + {{- include "msa.labels" . | nindent 4 }} + name: msa-broker +spec: + accessModes: + - ReadWriteOnce + {{ if .Values.defaultStorageClassName }}storageClassName: "{{ .Values.defaultStorageClassName }}"{{ end }} + resources: + requests: + storage: {{ .Values.pvcStorageSizes.msaBroker }} --- apiVersion: v1 kind: Service @@ -107,4 +128,24 @@ spec: size: 2 persistenceEnabled: true messageMigration: true -{{- end }} \ No newline at end of file +{{- end }} +{{- if .Values.monitoring.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: msa-broker + labels: + release: prometheus-operator +spec: + selector: + matchLabels: + service: msa-broker + endpoints: + - port: "8161" + interval: 30s + path: /metrics/ + scheme: http + namespaceSelector: + any: true +{{- end }} diff --git a/helm/templates/msa-camunda.yaml b/helm/templates/msa-camunda.yaml index 48df13f6..d3f54712 100644 --- a/helm/templates/msa-camunda.yaml +++ b/helm/templates/msa-camunda.yaml @@ -23,7 +23,9 @@ spec: {{- include "msa.initContainerWaitForDbUpdater" . | nindent 8 }} containers: - env: -{{- if .Values.msa_zipkin.enable }} + - name: _JAVA_OPTIONS + value: "-XX:MaxRAM=2g" +{{- if .Values.msa_zipkin.enabled }} - name: MANAGEMENT_TRACING_ENABLED value: "true" {{- end }} @@ -65,3 +67,23 @@ spec: targetPort: 8080 selector: app: msa-camunda +{{- if .Values.monitoring.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: msa-camunda + labels: + release: prometheus-operator +spec: + selector: + matchLabels: + service: msa-camunda + endpoints: + - port: "8080" + interval: 30s + path: /actuator/prometheus + scheme: http + namespaceSelector: + any: true +{{- end }} \ No newline at end of file diff --git a/helm/templates/msa-cerebro.yaml b/helm/templates/msa-cerebro.yaml index 475d4df8..13e4ff67 100644 --- a/helm/templates/msa-cerebro.yaml +++ b/helm/templates/msa-cerebro.yaml @@ -63,4 +63,25 @@ spec: targetPort: 9000 selector: app: msa-cerebro +--- +{{- if .Values.global.ingress.enable }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: msa-cerebro-ingress + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + rules: + - http: + paths: + - path: /cerebro(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: msa-cerebro + port: + number: 9000 +{{- end }} {{- end }} \ No newline at end of file diff --git a/helm/templates/msa-db-updater.yaml b/helm/templates/msa-db-updater.yaml index c7394fed..c23c66ab 100644 --- a/helm/templates/msa-db-updater.yaml +++ b/helm/templates/msa-db-updater.yaml @@ -1,4 +1,4 @@ -{{ if eq .Values.global.database.mode "cnpg" }} +{{ if eq .Values.global.database.mode "operator" }} --- apiVersion: apps/v1 kind: Deployment diff --git a/helm/templates/msa-db.yaml b/helm/templates/msa-db.yaml index a0dce59f..3ebee56f 100644 --- a/helm/templates/msa-db.yaml +++ b/helm/templates/msa-db.yaml @@ -37,6 +37,7 @@ spec: - mountPath: /var/lib/pgsql/17/data subPath: pgsql name: msa-db +{{- if .Values.msa_db_upgrader.enabled }} - name: postgresql-upgrader env: - name: PGAUTO_ONESHOT @@ -52,6 +53,7 @@ spec: - mountPath: /var/lib/postgresql/data subPath: pgsql name: msa-db +{{- end }} containers: - env: - name: MAX_CONNECTIONS @@ -80,12 +82,39 @@ spec: name: msa-db ports: - containerPort: 5432 + {{- if .Values.monitoring.enabled }} + - name: metrics + containerPort: 9187 + {{- end }} resources: {{- toYaml .Values.msa_db.resources | nindent 12 }} volumeMounts: - mountPath: /var/lib/pgsql/17/data subPath: pgsql name: msa-db + {{- if .Values.monitoring.enabled }} + - name: postgres-exporter + image: {{ .Values.monitoring.postgresExporter.image | default "quay.io/prometheuscommunity/postgres-exporter:v0.15.0" }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + env: + - name: DATA_SOURCE_URI + value: "localhost:5432/postgres?sslmode=disable" + - name: DATA_SOURCE_USER + valueFrom: + secretKeyRef: + name: msa-db-secret + key: username + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: msa-db-secret + key: password + ports: + - containerPort: 9187 + name: metrics + resources: + {{- toYaml .Values.monitoring.postgresExporter.resources | nindent 12 }} + {{- end }} restartPolicy: Always volumes: - name: msa-db @@ -104,8 +133,33 @@ spec: - name: "5432" port: 5432 targetPort: 5432 + {{- if .Values.monitoring.enabled }} + - name: metrics + port: 9187 + targetPort: 9187 + {{- end }} selector: app: msa-db +{{- if and (eq .Values.global.database.type "postgresql") (ne .Values.global.database.mode "operator") .Values.monitoring.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: msa-db + labels: + release: prometheus-operator +spec: + selector: + matchLabels: + service: msa-db + endpoints: + - port: metrics + interval: 30s + path: /metrics + scheme: http + namespaceSelector: + any: true +{{- end }} {{- end -}} {{ if eq .Values.global.database.mode "operator" }} diff --git a/helm/templates/msa-dev.yaml b/helm/templates/msa-dev.yaml index 9849ef0c..4dbee611 100644 --- a/helm/templates/msa-dev.yaml +++ b/helm/templates/msa-dev.yaml @@ -61,6 +61,9 @@ spec: command: [ "/docker-entrypoint.sh"] args: ["--cloudclapp" ] {{ end }} + env: + - name: ENABLE_K8S + value: "true" livenessProbe: exec: command: diff --git a/helm/templates/msa-es.yaml b/helm/templates/msa-es.yaml index 38735dd0..06592bfd 100644 --- a/helm/templates/msa-es.yaml +++ b/helm/templates/msa-es.yaml @@ -42,6 +42,14 @@ spec: value: single-node - name: script.painless.regex.enabled value: "true" +{{- if .Values.msa_cerebro.enabled }} + - name: "http.cors.enabled" + value: "true" + - name: "http.cors.allow-origin" + value: "*" + - name: "http.cors.allow-headers" + value: "X-Requested-With, Content-Type, Content-Length, Authorization" +{{- end }} image: "{{ .Values.msa_es.image }}" imagePullPolicy: {{ .Values.imagePullPolicy }} #livenessProbe: diff --git a/helm/templates/msa-front.yaml b/helm/templates/msa-front.yaml index 9acbc6f4..db9692a7 100644 --- a/helm/templates/msa-front.yaml +++ b/helm/templates/msa-front.yaml @@ -115,11 +115,18 @@ metadata: labels: service: msa-front-lb {{- include "msa.labels" . | nindent 4 }} + {{- if .Values.msa_front.metallb}} + annotations: + metallb.io/allow-shared-ip: "share-single-ip" + {{- end }} name: msa-front-lb spec: type: LoadBalancer - externalTrafficPolicy: Local - {{- if and .Values.msa_front.externalIPs }} + externalTrafficPolicy: Cluster + {{- if .Values.msa_front.metallb}} + loadBalancerIP: {{ index .Values.msa_front.externalIPs 0 }} + {{- end }} + {{- if and (not .Values.msa_front.metallb) .Values.msa_front.externalIPs }} externalIPs: {{- toYaml .Values.msa_front.externalIPs | nindent 4 }} {{- end }} @@ -145,6 +152,10 @@ metadata: nginx.ingress.kubernetes.io/ssl-redirect: "true" name: msa-ui-index-ingress spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} rules: - http: paths: @@ -176,6 +187,9 @@ spec: name: msa-front port: number: 443 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} {{- end }} --- {{- if .Values.openshift.enabled }} diff --git a/helm/templates/msa-kibana.yaml b/helm/templates/msa-kibana.yaml index 65d9bb03..95bd6aa2 100644 --- a/helm/templates/msa-kibana.yaml +++ b/helm/templates/msa-kibana.yaml @@ -73,6 +73,10 @@ kind: Ingress metadata: name: msa-kibana-ingress spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} rules: - http: paths: @@ -83,4 +87,7 @@ spec: name: msa-kibana port: number: 5601 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} {{- end -}} \ No newline at end of file diff --git a/helm/templates/msa-monitor-writer.yaml b/helm/templates/msa-monitor-writer.yaml index 263cd93e..d39a6d80 100644 --- a/helm/templates/msa-monitor-writer.yaml +++ b/helm/templates/msa-monitor-writer.yaml @@ -21,6 +21,18 @@ spec: {{- end }} containers: - name: msa-monitor-writer + env: + - name: _JAVA_OPTIONS + value: "-XX:MaxRAM=1g" + - name: SPRING_ARTEMIS_BROKER_URL + value: {{ include "msa.buildBrokerUrl" . }} +{{- if .Values.msa_zipkin.enabled }} + - name: MANAGEMENT_TRACING_ENABLED + value: "true" + - name: MANAGEMENT_ZIPKIN_TRACING_ENDPOINT + value: http://msa-zipkin:9411/api/v2/spans +{{ end }} + image: "{{ .Values.msa_monitor_writer.image }}" imagePullPolicy: {{ .Values.imagePullPolicy }} ports: @@ -35,7 +47,39 @@ spec: path: /mon-writer/actuator/health/readiness port: 9099 initialDelaySeconds: 30 - {{- if not .Values.disableResourceConstraints }} - resources: - {{- toYaml .Values.msa_api.resources | nindent 12 }} - {{- end }} \ No newline at end of file +--- +apiVersion: v1 +kind: Service +metadata: + labels: + service: msa-monitor-writer + {{- include "msa.labels" . | nindent 4 }} + name: msa-monitor-writer +spec: + ports: + - name: "9099" + port: 9099 + targetPort: 9099 + selector: + app: msa-monitor-writer + +{{- if .Values.monitoring.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: msa-monitor-writer + labels: + release: prometheus-operator +spec: + selector: + matchLabels: + service: msa-monitor-writer + endpoints: + - port: "9099" + interval: 30s + path: /mon-writer/actuator/prometheus + scheme: http + namespaceSelector: + any: true +{{- end }} diff --git a/helm/templates/msa-monitoring.yaml b/helm/templates/msa-monitoring.yaml index 8a5efeea..9ff833dd 100644 --- a/helm/templates/msa-monitoring.yaml +++ b/helm/templates/msa-monitoring.yaml @@ -7,6 +7,8 @@ metadata: name: msa-monitoring spec: replicas: {{ .Values.msa_monitoring.replicas }} + strategy: + type: Recreate selector: matchLabels: app: msa-monitoring @@ -92,8 +94,6 @@ spec: name: msa-entities - mountPath: /opt/fmc_repository name: msa-repository - - mountPath: /opt/rrd - name: rrd-repository - mountPath: /opt/sms/spool/parser name: msa-monitbulkfiles - mountPath: /opt/sms/spool/parser-error @@ -101,6 +101,7 @@ spec: - mountPath: /etc/odbc.ini name: sms-odbc-ini subPath: odbc.ini + restartPolicy: Always volumes: - name: msa-dev persistentVolumeClaim: @@ -111,9 +112,6 @@ spec: - name: msa-repository persistentVolumeClaim: claimName: msa-repository - - name: rrd-repository - persistentVolumeClaim: - claimName: rrd-repository - name: msa-monitbulkfiles persistentVolumeClaim: claimName: msa-monitbulkfiles diff --git a/helm/templates/msa-opensearch-dashboard.yaml b/helm/templates/msa-opensearch-dashboard.yaml index c9eaebb0..b2d9bce1 100644 --- a/helm/templates/msa-opensearch-dashboard.yaml +++ b/helm/templates/msa-opensearch-dashboard.yaml @@ -35,7 +35,7 @@ spec: valueFrom: secretKeyRef: name: elasticsearch-creds - key: paswword + key: password image: "{{ .Values.msa_opensearch_dashboards.image }}" imagePullPolicy: {{ .Values.imagePullPolicy }} livenessProbe: @@ -66,4 +66,36 @@ spec: targetPort: 5601 selector: app: msa-open-search-dashboards +{{- if .Values.global.ingress.enable }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: msa-opensearch-ingress + annotations: + nginx.ingress.kubernetes.io/proxy-buffer-size: "16k" + nginx.ingress.kubernetes.io/proxy-buffers-number: "4" + nginx.ingress.kubernetes.io/proxy-busy-buffers-size: "64k" + labels: + service: msa-kibana + {{- include "msa.labels" . | nindent 4 }} +spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} + rules: + - http: + paths: + - path: /kibana + pathType: Prefix + backend: + service: + name: msa-kibana + port: + number: 5601 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} +{{- end -}} {{- end -}} \ No newline at end of file diff --git a/helm/templates/msa-opensearch.yaml b/helm/templates/msa-opensearch.yaml index 0c244310..d2f9ba5e 100644 --- a/helm/templates/msa-opensearch.yaml +++ b/helm/templates/msa-opensearch.yaml @@ -51,10 +51,17 @@ spec: volumeMounts: - mountPath: /usr/share/elasticsearch/data name: msa-opensearch - volumes: - - name: msa-opensearch - persistentVolumeClaim: - claimName: msa-opensearch + # No need for volumes here, handled by volumeClaimTemplates + volumeClaimTemplates: + - metadata: + name: msa-opensearch + spec: + accessModes: + - ReadWriteOnce + {{ if .Values.defaultStorageClassName }}storageClassName: "{{ .Values.defaultStorageClassName }}"{{ end }} + resources: + requests: + storage: {{ .Values.pvcStorageSizes.msaOpensearch }} --- apiVersion: v1 kind: Service diff --git a/helm/templates/msa-parse.yaml b/helm/templates/msa-parse.yaml index f71aa8e0..4c6c4891 100644 --- a/helm/templates/msa-parse.yaml +++ b/helm/templates/msa-parse.yaml @@ -8,6 +8,8 @@ metadata: name: msa-parse spec: replicas: {{ .Values.msa_rsyslog.replicas }} + strategy: + type: Recreate selector: matchLabels: app: msa-parse @@ -90,6 +92,7 @@ spec: - mountPath: /etc/odbc.ini name: sms-odbc-ini subPath: odbc.ini + restartPolicy: Always volumes: - name: msa-dev persistentVolumeClaim: diff --git a/helm/templates/msa-rsyslog.yaml b/helm/templates/msa-rsyslog.yaml index c66c51df..ef37855f 100644 --- a/helm/templates/msa-rsyslog.yaml +++ b/helm/templates/msa-rsyslog.yaml @@ -7,6 +7,8 @@ metadata: name: msa-rsyslog-and-snmptrap spec: replicas: {{ .Values.msa_rsyslog.replicas }} + strategy: + type: Recreate selector: matchLabels: app: msa-rsyslog-and-snmptrap @@ -111,6 +113,7 @@ spec: - mountPath: /etc/odbc.ini name: sms-odbc-ini subPath: odbc.ini + restartPolicy: Always volumes: - name: msa-dev persistentVolumeClaim: @@ -161,11 +164,18 @@ metadata: labels: service: msa-rsyslog-lb {{- include "msa.labels" . | nindent 4 }} + {{- if .Values.msa_rsyslog.metallb}} + annotations: + metallb.io/allow-shared-ip: "share-single-ip" + {{- end }} name: msa-rsyslog-lb spec: type: LoadBalancer - externalTrafficPolicy: Local - {{- if and .Values.msa_rsyslog.externalIPs}} + externalTrafficPolicy: Cluster + {{- if .Values.msa_rsyslog.metallb}} + loadBalancerIP: {{ index .Values.msa_front.externalIPs 0 }} + {{- end }} + {{- if and (not .Values.msa_rsyslog.metallb) .Values.msa_front.externalIPs }} externalIPs: {{- toYaml .Values.msa_rsyslog.externalIPs | nindent 4 }} {{- end }} diff --git a/helm/templates/msa-secrets.yaml b/helm/templates/msa-secrets.yaml index 9892247a..1e5718f5 100644 --- a/helm/templates/msa-secrets.yaml +++ b/helm/templates/msa-secrets.yaml @@ -9,7 +9,7 @@ metadata: type: Opaque stringData: es_credentials: {{ include "msa.esBasicAuth" . }} - paswword: {{ .Values.global.es.credentials.password }} + password: {{ .Values.global.es.credentials.password }} username: {{ .Values.global.es.credentials.username }} --- apiVersion: v1 diff --git a/helm/templates/msa-smtp.yaml b/helm/templates/msa-smtp.yaml index 6ca3656c..bb06e6c0 100644 --- a/helm/templates/msa-smtp.yaml +++ b/helm/templates/msa-smtp.yaml @@ -25,23 +25,15 @@ spec: containers: - image: "{{ .Values.msa_smtp.image }}" imagePullPolicy: {{ .Values.imagePullPolicy }} - env: - - name: ALLOWED_SENDER_DOMAINS - value: "" - - name: ALLOW_EMPTY_SENDER_DOMAINS - value: "true" - - name: HOSTNAME - value: "msa-smtp" - - name: POSTFIX_message_size_limit - value: "2097152" - # uncomment the two lines below to configure a relay host (Mail Transfer Agent) - #- name: RELAYHOST - # value: :25 name: msa-smtp livenessProbe: exec: command: - - /scripts/healthcheck.sh + - timeout + - "10s" + - bash + - -c + - ':> /dev/tcp/127.0.0.1/25' timeoutSeconds: 10 periodSeconds: 10 successThreshold: 1 @@ -49,7 +41,11 @@ spec: readinessProbe: exec: command: - - /scripts/healthcheck.sh + - timeout + - "10s" + - bash + - -c + - ':> /dev/tcp/127.0.0.1/25' timeoutSeconds: 10 periodSeconds: 10 successThreshold: 1 diff --git a/helm/templates/msa-ui.yaml b/helm/templates/msa-ui.yaml index a712f8f6..bd54671e 100644 --- a/helm/templates/msa-ui.yaml +++ b/helm/templates/msa-ui.yaml @@ -55,11 +55,18 @@ spec: selector: app: msa-ui --- -{{- if .Values.global.ingress.enable -}} +{{ if .Values.global.ingress.enable -}} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: + annotations: + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" name: msa-ui-ingress +spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} spec: rules: - http: @@ -71,4 +78,7 @@ spec: name: msa-ui port: number: 8080 -{{- end -}} \ No newline at end of file +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} +{{- end -}} diff --git a/helm/templates/msa-zipkin.yaml b/helm/templates/msa-zipkin.yaml index 9e87edcd..b49fd39c 100644 --- a/helm/templates/msa-zipkin.yaml +++ b/helm/templates/msa-zipkin.yaml @@ -48,4 +48,24 @@ spec: targetPort: 9411 selector: app: msa-zipkin -{{- end }} \ No newline at end of file +{{- end }} +{{- if .Values.monitoring.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: msa-zipkin + labels: + release: prometheus-operator +spec: + selector: + matchLabels: + app: msa-zipkin + endpoints: + - port: "9411" + interval: 30s + path: /prometheus + scheme: http + namespaceSelector: + any: true +{{- end }} diff --git a/helm/templates/version-configmap.yaml b/helm/templates/version-configmap.yaml index c973f32c..1ddf9e1b 100644 --- a/helm/templates/version-configmap.yaml +++ b/helm/templates/version-configmap.yaml @@ -3,4 +3,4 @@ kind: ConfigMap metadata: name: version-configmap data: - index.html: '{"jira_version":"3.2.1","msa_version":"3.2.1","ccla_version":"1.2.1","build":"392"}' + index.html: '{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"658"}' diff --git a/helm/values.public.yaml b/helm/values.public.yaml index e2d4a31e..ae6093cc 100644 --- a/helm/values.public.yaml +++ b/helm/values.public.yaml @@ -53,7 +53,7 @@ msa_snmptrap: image: openmsa/openmsa:msa2-snmptrap-2.8.11-dee5c28ec7481c77cac4bbdf54ad94d36217072c msa_smtp: - image: boky/postfix:4.2.1-alpine + image: ubiqube/msa2-smtp:sha-a4148156deefcfd8d84582e75b9ecff39d7eee24 msa_ui: image: openmsa/openmsa:msa2-ui-2.8.11-a639770d54ab742d0bf8febc84e4377a463e9749 diff --git a/helm/values.yaml b/helm/values.yaml index e729788c..315462d5 100644 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -59,12 +59,17 @@ global: tokenUrl: http://msa-auth:8080/auth/realms/msa/protocol/openid-connect/token workflowClientId: msa-workflow workflowClientSecret: 00s8GzqcjRQ7u9c9y7sP8im4FWGQUvDC + # tls: + # - hosts: + # - "msa.ubiqube.com" + # secretName: msa-tls create_db: true #set to false, to not deploy the db pvcStorageSizes: msaApiKeystore: 100Mi msaApiLogs: 100Mi + msaBroker: 100Mi msaBulkfiles: 50Mi msaBulkfilesErr: 50Mi msaParsebulkfiles: 50Mi @@ -83,14 +88,13 @@ pvcStorageSizes: msaRepository: 3Gi msaSvn: 100Mi msaDb: 5Gi - rrdRepository: 500Mi msaMongodb: 1Gi msa_adapters: - image: ubiqube/msa2-adapters:sha-46656a10b040efcb6783d0cb18917cdcfc5e74fa + image: ubiqube/msa2-adapters:sha-7272142683783e432f8834b8ab64a089abbe41a0 msa_alarm: - image: ubiqube/msa2-alarm:sha-9b99c5171107203ae28c5f9f501371c46d244c2b + image: ubiqube/msa2-alarm:sha-948779a57daf0378d645693404181162a5a2fb6b replicas: 1 resources: requests: @@ -99,7 +103,7 @@ msa_alarm: ephemeral-storage: "100Mi" msa_api: - image: ubiqube/msa2-api:sha-520736a182a77911c62dd5785decad9235003143 + image: ubiqube/msa2-api:sha-932b5e70acb87524e4fa1670e75fed08b6ddbb59 replicas: 1 resources: limits: @@ -111,7 +115,7 @@ msa_api: ephemeral-storage: "100Mi" msa_auth: - image: ubiqube/msa2-auth:sha-583e0c3fca70d231b1db52e07c7d70d5f1b7dd29 + image: ubiqube/msa2-auth:sha-aa48205a5ece128029a1162f74640f23f70ad6d8 replicas: 1 admin_user: username: admin @@ -123,7 +127,7 @@ msa_auth: ephemeral-storage: "100Mi" msa_broker: - image: ubiqube/msa2-broker:sha-f610df08d8c9df52148ed6c85a82c5db1d16cc1d + image: ubiqube/msa2-broker:sha-7090331f69a90a4d339072bd52b6b4b43744459c mode: local replicas: 1 resources: @@ -133,7 +137,7 @@ msa_broker: ephemeral-storage: "50Mi" msa_bud: - image: ubiqube/msa2-bud:sha-6770e77371f2710851600a411f455561a14385e7 + image: ubiqube/msa2-bud:sha-3a6eed07bcea3a1a18d29ad14e7df5b422cbba91 replicas: 1 resources: requests: @@ -142,7 +146,7 @@ msa_bud: ephemeral-storage: "100Mi" msa_camunda: - image: ubiqube/msa2-camunda:sha-0521c708e11ff65f3912e92d5008af81d5661a52 + image: ubiqube/msa2-camunda:sha-f9c9e1e4e3cbd3d9bb868048a65f45e5f4fc807c replicas: 1 resources: requests: @@ -152,7 +156,7 @@ msa_camunda: msa_cerebro: enabled: false - image: ubiqube/msa2-cerebro:sha-c1f46f535cf853d40ee4e11f536740ac7332a0d7 + image: ubiqube/msa2-cerebro:sha-0142d25b3f4573e92162319f95142c141c302216 replicas: 1 resources: requests: @@ -163,7 +167,7 @@ msa_cerebro: msa_db: ui: enabled: false - image: ubiqube/msa2-db:sha-0eaaedf0ece58ecb0eb03a456b1de7d970e7f0e3 + image: ubiqube/msa2-db:sha-336ff86171aa49922743e35a25c2cb76f68c840b hostPath: /shared/database upgrader_image: ubiqube/dev:msa-pg-upgrader resources: @@ -177,16 +181,14 @@ msa_db: # parameters: # barmanObjectName: minio-eu -msa_db_updater: - image: ubiqube/msa2-db-updater:sha-0a6e91d4d8813eebe4a971e13c6848765a38b79b - msa_db_upgrader: + enabled: false image: ubiqube/dev:msa-pg-upgrader create_maria_db: false # Deploy Maria DB instead of postgres msa_dev: - image: ubiqube/msa2-linuxdev:sha-e3a765f744035ff972556adb388a575337e380c1 + image: ubiqube/msa2-linuxdev:sha-55c8f5e0d0452ac7c45d4bf2e43bdfd1b62ff2bc replicas: 1 resources: requests: @@ -196,7 +198,7 @@ msa_dev: msa_es: enabled: true - image: ubiqube/msa2-es:sha-600197f92e1e7d80f93ed8e7ee579dbb2d0459ed + image: ubiqube/msa2-es:sha-ed8a4af34257776376c4429e8a9a5d6d84fdcca2 replicas: 1 resources: requests: @@ -206,7 +208,7 @@ msa_es: msa_es_ilm: enabled: true - image: ubiqube/msa2-es-ilm:sha-892f159577edf57329d4bf843cdc3d6a5c8199db + image: ubiqube/msa2-es-ilm:sha-e1b9952e7b8a397a603d1159626ef9741c6f2941 replicas: 1 resources: requests: @@ -216,9 +218,9 @@ msa_es_ilm: msa_front: externalIPs: null - ingress: false # When set to true, requires a Ingress Controller - loadbalancer: true - image: ubiqube/msa2-front:sha-47144de93d5c83b9926931b86e71b3afa041bd31 + ingress: true + loadbalancer: false + image: ubiqube/msa2-front:sha-8e68a1259eee188f514534d860cbd2332f53cb7b replicas: 1 resources: requests: @@ -227,7 +229,7 @@ msa_front: msa_kibana: enabled: true - image: ubiqube/msa2-kibana:sha-dd084941368ff50d48c71340fab6d05c2282fb03 + image: ubiqube/msa2-kibana:sha-45e1e850a2ee9316dbfe48f2083e6604ee294df9 replicas: 1 resources: requests: @@ -235,11 +237,11 @@ msa_kibana: memory: "600Mi" msa_kibana_wf: - image: ubiqube/msa2-wf-kibana:sha-b841783251a9a44ef24be79cf00e157cde9c146c + image: ubiqube/msa2-wf-kibana:sha-5b183956d2bf2d62423e7ef059e3af35639aad6a msa_linuxme: enabled: false - image: ubiqube/msa2-linuxme:sha-08392b5f84076cbbf8e3aedc0835a2d263a70781 + image: ubiqube/msa2-linuxme:sha-3cc0ba072e49ec8dfc4003e203373158640d47b9 replicas: 1 resources: requests: @@ -248,11 +250,11 @@ msa_linuxme: memory: "20Mi" msa_mariadb: - image: docker.io/ubiqube/msa2-mariadb:sha-bf833c7d14957bf03013f1798dc4919cd84ba1b9 + image: docker.io/ubiqube/msa2-mariadb:sha-f2fb3a804060df12cbece26b2485b1f7934c63c7 msa_mongodb: enabled: true - image: ubiqube/msa2-mongodb:sha-a6b1c7ba0d634f0deed06dca5ef893bea32f2803 + image: ubiqube/msa2-mongodb:sha-1b40c09ed18524e2e0561e139537d73f635b69a3 hostPath: /shared/database mode: local resources: @@ -262,13 +264,13 @@ msa_mongodb: memory: "200Mi" msa_monitor_writer: - image: ubiqube/msa2-monitor-writer:sha-1f495379d3e517d154551c45cda960c1d04e878a + image: ubiqube/msa2-monitor-writer:sha-efa9743b295c4cdce519cfd45b76edeebde05f4f replicas: 1 msa_monitoring: - image: ubiqube/msa2-monitoring:sha-ef9b814467361d9a1858448d89448dcb26ca5028 + image: ubiqube/msa2-monitoring:sha-3ed8905abee88ca8f9058f341913f704d2db12fc parameters: - amqp_address: syslogs + amqp_address: "core-engine.monitoring" # One of: RRD, ES, AMQP target_mon: AMQP replicas: 1 @@ -278,9 +280,12 @@ msa_monitoring: cpu: "100m" ephemeral-storage: "100Mi" +msa_ms_inventory_management_installer: + image: ubiqube/msa2-ms-inventory-management:sha-b28ae96821300678158d3cc5ff2917be559e14de + msa_opensearch: enabled: false - image: ubiqube/msa2-opensearch:sha-fc2c1a1975e58c96605f31bf78483f29b1d3db81 + image: ubiqube/msa2-opensearch:sha-8d70d2b01f1cd3643605126871f8a1a5c8b3d548 replicas: 1 resources: requests: @@ -290,7 +295,7 @@ msa_opensearch: msa_opensearch_dashboards: enabled: false - image: opensearchproject/opensearch-dashboards:1.3.20 + image: ubiqube/msa2-opensearch-dashboard:sha-45e1e850a2ee9316dbfe48f2083e6604ee294df9 replicas: 1 resources: requests: @@ -298,7 +303,7 @@ msa_opensearch_dashboards: memory: "600Mi" msa_parse: - image: ubiqube/msa2-parse:sha-0d09a8d0ecd2db050b57fd1839e30f3b0cda6762 + image: ubiqube/msa2-parse:sha-8fb673ecb2951281f18bddac2f3d8e1ffd02df93 replicas: 1 resources: requests: @@ -320,7 +325,7 @@ msa_rsyslog: ephemeral-storage: "100Mi" msa_sms: - image: ubiqube/msa2-sms:sha-105b39809742eedd920ddfc737facb8c8d9f1d4b + image: ubiqube/msa2-sms:sha-6ce9834f41d43c4e53560d4fd9991fb142f77139 replicas: 1 resources: requests: @@ -329,7 +334,7 @@ msa_sms: ephemeral-storage: "100Mi" msa_smtp: - image: boky/postfix:4.2.1-alpine + image: ubiqube/msa2-smtp:sha-4b49c82383b22067be87ff969ae87ccf7a1f1f51 replicas: 1 resources: requests: @@ -338,7 +343,7 @@ msa_smtp: ephemeral-storage: "50Mi" msa_snmptrap: - image: ubiqube/msa2-snmptrap:sha-a779ce99dd7e8bc38656830e5ecab15a1a7074d7 + image: ubiqube/msa2-snmptrap:sha-2b8dc239672da2e96165bd4e751b7c84330343ca replicas: 1 resources: requests: @@ -347,7 +352,7 @@ msa_snmptrap: ephemeral-storage: "100Mi" msa_svn: - image: ubiqube/msa2-svn:sha-1445c4fa41074acd31f26105035cd158662cd9c2 + image: ubiqube/msa2-svn:sha-2db53c87bde3bbf4f2fc43c1a84898f415fedd07 replicas: 1 resources: requests: @@ -359,10 +364,10 @@ msa_svn: password: UB1s5n msa_topology_wf: - image: docker.io/ubiqube/msa2-wf-topology:sha-a6768eccdd9df9df6f05e9f262233009aaa4d76a + image: docker.io/ubiqube/msa2-wf-topology:sha-123d7c227159dbf3550cd1e2a860728b29d91800 msa-tutorial-wf: - image: docker.io/ubiqube/msa2-wf-tutorial:sha-8149ad078b4a52e2a49af8a847ab9310a45bfa5e + image: docker.io/ubiqube/msa2-wf-tutorial:sha-117f4fafb4beb12dd5a2fc94bc1aa55dd21c6f38 msa_ui: env: @@ -396,7 +401,7 @@ msa_ui: value: "true" - name: FEATURE_WORKFLOW_OWNER value: "false" - image: ubiqube/msa2-ui:sha-2dd82e24b1e83cb64877f5e3f0a15b8844457c5c + image: ubiqube/msa2-ui:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 replicas: 1 resources: requests: @@ -404,6 +409,9 @@ msa_ui: cpu: "100m" ephemeral-storage: "50Mi" +msa_workflow_inventory_management_installer: + image: ubiqube/msa2-workflow-inventory-management:sha-9ee147ef8864e85e4c31ba21a8fae9aaaae1ca3d + msa_zipkin: enabled: true image: docker.io/openzipkin/zipkin @@ -436,28 +444,35 @@ openshift: installCCLA: false ccla_blueprint: - image: docker.io/ubiqube/cld-blueprint-installer:sha-b02865c49070478a960e6ac651ba92a4c9c9a043 + image: docker.io/ubiqube/cld-blueprint-installer:sha-1f4dac22a1467dab2daf4c10acdefb820bb315cc ccla_automation: - image: docker.io/ubiqube/cld-automation-installer:sha-e9471cd21943ebba5cb456a6f6708b4cbe079ed4 + image: docker.io/ubiqube/cld-automation-installer:sha-23bcb2379abdb07ea85c0923788dcfcd8f4bae32 cld_cisco_ms: - image: docker.io/ubiqube/cld-cisco-ms-installer:sha-33b9456a23d7892fbb960ec354c4b37aa7438235 + image: docker.io/ubiqube/cld-cisco-ms-installer:sha-4251bc22e2900ef0606f9beb458a073c0b903857 -cld-paloalto-prisma-ms: - image: ubiqube/cld-paloalto-prisma-ms-installer:sha-d71b925a40d4513d4774fcf7028438a56e9ed695 +cld_fortinet_ms_installer: + image: docker.io/ubiqube/cld-fortinet-ms-installer:sha-d62f1dc7524baebee1acc5123e73084d2e382a26 cld_netskope_ms: - image: docker.io/ubiqube/cld-netskope-ms-installer:sha-8e44a05c29b1779a60061dfbf469c3413d02a7cb + image: docker.io/ubiqube/cld-netskope-ms-installer:sha-1f77ef4f95d0d6374729f4a6cf4a8daa024668a7 + +cld_paloalto_ngfw_ms_installer: + image: docker.io/ubiqube/cld-paloalto-ngfw-installer:sha-35368719b7558753f37ea5df16595cdc3e29acc8 + +cld-paloalto-prisma-ms: + image: ubiqube/cld-paloalto-prisma-ms-installer:sha-29e9ec7352d027301226eb137c2179dea60548a8 ccla_wf: - image: docker.io/ubiqube/msa2-ccla-wf-installer:sha-a496e2bd5cd9121f99d5050e99ec2591c58dcfd5 + image: docker.io/ubiqube/msa2-ccla-wf-installer:sha-e06e1cb61a4e53930f602e9af9e4a3131ccec01b ccla_bin: - image: docker.io/ubiqube/msa2-ccla-bin-installer:sha-9cabf0b858edb4593fd652eb2baa684f51bfe427 + enabled: true + image: docker.io/ubiqube/msa2-ccla-bin-installer:sha-926a02fb507f520be7670d51eb6036093afbb6bf ccla: - image: ubiqube/cloudclapp:sha-2dd82e24b1e83cb64877f5e3f0a15b8844457c5c + image: ubiqube/cloudclapp:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 replicas: 1 resources: requests: @@ -466,7 +481,7 @@ ccla: ephemeral-storage: "50Mi" opslab: - image: ubiqube/sase-opslab:sha-2dd82e24b1e83cb64877f5e3f0a15b8844457c5c + image: ubiqube/sase-opslab:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 replicas: 1 resources: requests: @@ -475,7 +490,7 @@ opslab: ephemeral-storage: "50Mi" ccla_scan_app: - image: ubiqube/cloudclapp-scan:sha-0c453d7dc6b1fe51db9f444439b8a4271bfff5f4 + image: ubiqube/cloudclapp-scan:sha-d1303b2b66dfdafd870b501cdb91b9c5a1dc6465 replicas: 1 resources: requests: @@ -484,6 +499,7 @@ ccla_scan_app: ephemeral-storage: "50Mi" ccla_scan_env: + enabled: true image: zaproxy/zap-stable:2.16.1 replicas: 1 resources: @@ -493,6 +509,7 @@ ccla_scan_env: ephemeral-storage: "50Mi" key_vault: + enabled: true image: hashicorp/vault:latest replicas: 1 resources: @@ -500,3 +517,29 @@ key_vault: memory: "100Mi" cpu: "100m" ephemeral-storage: "50Mi" + +# Monitoring for msa-db +monitoring: + enabled: false + postgresExporter: + image: quay.io/prometheuscommunity/postgres-exporter:v0.15.0 + resources: + requests: + cpu: "10m" + memory: "32Mi" + ephemeral-storage: "10Mi" + limits: + cpu: "100m" + memory: "128Mi" + ephemeral-storage: "50Mi" + jmxExporter: + image: bitnami/jmx-exporter:0.20.0 + resources: + requests: + cpu: "10m" + memory: "32Mi" + ephemeral-storage: "10Mi" + limits: + cpu: "100m" + memory: "128Mi" + ephemeral-storage: "50Mi" From 92136ce6564b512fb7f7a45b40e895a4edb63d67 Mon Sep 17 00:00:00 2001 From: Antoine Brun Date: Tue, 30 Sep 2025 15:08:08 +0200 Subject: [PATCH 2/5] update ALLOW_PARALLEL_CALLCOMMANDS to true --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 9ff7249a..dac5f1e1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -503,7 +503,7 @@ services: - *db-configuration - *es-configuration - *svn-configuration - ALLOW_PARALLEL_CALLCOMMANDS: false + ALLOW_PARALLEL_CALLCOMMANDS: true healthcheck: interval: 5s retries: 10 From fd6ef0e31a00cedae9c4e60d39b446f196f3a51c Mon Sep 17 00:00:00 2001 From: Antoine Brun Date: Tue, 30 Sep 2025 15:12:11 +0200 Subject: [PATCH 3/5] revert update ALLOW_PARALLEL_CALLCOMMANDS to true --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index dac5f1e1..9ff7249a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -503,7 +503,7 @@ services: - *db-configuration - *es-configuration - *svn-configuration - ALLOW_PARALLEL_CALLCOMMANDS: true + ALLOW_PARALLEL_CALLCOMMANDS: false healthcheck: interval: 5s retries: 10 From 5c9ec8b1e3d6dda3de70341e2cb685e1d133e8ef Mon Sep 17 00:00:00 2001 From: Antoine Brun Date: Tue, 30 Sep 2025 15:13:58 +0200 Subject: [PATCH 4/5] update to 3.2.2b660 / add Values.msa_sms.allowParallelCallCommands --- front/version.html | 2 +- helm/Chart.yaml | 2 +- helm/templates/msa-sms.yaml | 2 ++ helm/templates/version-configmap.yaml | 2 +- helm/values.yaml | 1 + 5 files changed, 6 insertions(+), 3 deletions(-) diff --git a/front/version.html b/front/version.html index efaf8953..bce3936a 100644 --- a/front/version.html +++ b/front/version.html @@ -1 +1 @@ -{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"635"} +{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"660"} \ No newline at end of file diff --git a/helm/Chart.yaml b/helm/Chart.yaml index d2e9cefc..b30284a4 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 3.2.2.658 +appVersion: 3.2.2.660 description: Helm chart for msa and cloudclapp name: msa type: application diff --git a/helm/templates/msa-sms.yaml b/helm/templates/msa-sms.yaml index de1fa2f2..d2ea3431 100644 --- a/helm/templates/msa-sms.yaml +++ b/helm/templates/msa-sms.yaml @@ -48,6 +48,8 @@ spec: secretKeyRef: name: msa-db-secret key: username + - name: ALLOW_PARALLEL_CALLCOMMANDS + value: "{{ .Values.msa_sms.allowParallelCallCommands }}" {{ if .Values.installCCLA }} - name: UBIQUBE_INSTALL_AWS_CLI value: "true" diff --git a/helm/templates/version-configmap.yaml b/helm/templates/version-configmap.yaml index 1ddf9e1b..3a5d5405 100644 --- a/helm/templates/version-configmap.yaml +++ b/helm/templates/version-configmap.yaml @@ -3,4 +3,4 @@ kind: ConfigMap metadata: name: version-configmap data: - index.html: '{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"658"}' + index.html: '{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"660"}' diff --git a/helm/values.yaml b/helm/values.yaml index 315462d5..96e11a9b 100644 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -325,6 +325,7 @@ msa_rsyslog: ephemeral-storage: "100Mi" msa_sms: + allowParallelCallCommands: false image: ubiqube/msa2-sms:sha-6ce9834f41d43c4e53560d4fd9991fb142f77139 replicas: 1 resources: From ddb82e6302eaa3eb7f1acac1d601ce21a02dc175 Mon Sep 17 00:00:00 2001 From: Antoine Brun Date: Thu, 2 Oct 2025 16:07:21 +0200 Subject: [PATCH 5/5] update with build 685 --- docker-compose.ccla-pack.yml | 37 ++++++++++ docker-compose.ccla.yml | 53 ++++++++------- docker-compose.yml | 44 ++---------- front/version.html | 2 +- helm/Chart.yaml | 2 +- helm/images.application.txt | 12 ++-- helm/templates/ccla/ccla-bin-installer.yaml | 34 ++++++++++ helm/templates/ccla/key-vault.yaml | 2 +- helm/templates/ccla/opslab.yaml | 67 +++++++++++++++++++ .../cloudiamo/automation-installer.yaml | 34 ++++++++++ .../cloudiamo/blueprint-installer.yaml | 34 ++++++++++ .../cloudiamo/cisco-ms-installer.yaml | 34 ++++++++++ .../cloudiamo/fortinet-ms-installer.yaml | 34 ++++++++++ .../cloudiamo/linux-ms-installer.yaml | 32 +++++++++ .../cloudiamo/netskope-ms-installer.yaml | 34 ++++++++++ .../cloudiamo/paloalto-ngfw-ms-installer.yaml | 34 ++++++++++ .../paloalto-prisma-ms-installer.yaml | 34 ++++++++++ .../cloudiamo/zscaler-ms-installer.yaml | 34 ++++++++++ .../mswf/ms-inventory-management.yaml | 32 +++++++++ .../mswf/workflow-inventory-management.yaml | 32 +++++++++ helm/templates/mswf/workflow-topology.yaml | 32 +++++++++ helm/templates/version-configmap.yaml | 2 +- helm/values.yaml | 39 ++++++----- 23 files changed, 602 insertions(+), 92 deletions(-) create mode 100644 helm/templates/ccla/ccla-bin-installer.yaml create mode 100644 helm/templates/ccla/opslab.yaml create mode 100644 helm/templates/cloudiamo/automation-installer.yaml create mode 100644 helm/templates/cloudiamo/blueprint-installer.yaml create mode 100644 helm/templates/cloudiamo/cisco-ms-installer.yaml create mode 100644 helm/templates/cloudiamo/fortinet-ms-installer.yaml create mode 100644 helm/templates/cloudiamo/linux-ms-installer.yaml create mode 100644 helm/templates/cloudiamo/netskope-ms-installer.yaml create mode 100644 helm/templates/cloudiamo/paloalto-ngfw-ms-installer.yaml create mode 100644 helm/templates/cloudiamo/paloalto-prisma-ms-installer.yaml create mode 100644 helm/templates/cloudiamo/zscaler-ms-installer.yaml create mode 100644 helm/templates/mswf/ms-inventory-management.yaml create mode 100644 helm/templates/mswf/workflow-inventory-management.yaml create mode 100644 helm/templates/mswf/workflow-topology.yaml diff --git a/docker-compose.ccla-pack.yml b/docker-compose.ccla-pack.yml index f0407d9f..6a4090f9 100644 --- a/docker-compose.ccla-pack.yml +++ b/docker-compose.ccla-pack.yml @@ -87,3 +87,40 @@ services: volumes: - msa_repository:/opt/fmc_repository - msa_dev:/opt/devops/ + msa-workflow-inventory-management: + depends_on: + - msa-dev + image: ubiqube/msa2-workflow-inventory-management:sha-9ee147ef8864e85e4c31ba21a8fae9aaaae1ca3d + logging: + <<: *logging + volumes: + - msa_repository:/opt/fmc_repository + - msa_dev:/opt/devops/ + msa-ms-inventory-management: + depends_on: + - msa-dev + image: ubiqube/msa2-ms-inventory-management:sha-b28ae96821300678158d3cc5ff2917be559e14de + logging: + <<: *logging + volumes: + - msa_repository:/opt/fmc_repository + - msa_dev:/opt/devops/ + msa-topology-wf: + depends_on: + - msa-dev + image: ubiqube/msa2-wf-topology:sha-a0bd68379dbd582cb727a85b0daf5a090239def1 + logging: + <<: *logging + volumes: + - msa_repository:/opt/fmc_repository + - msa_dev:/opt/devops/ + msa-tutorial-wf: + depends_on: + - msa-dev + image: ubiqube/msa2-wf-tutorial:sha-117f4fafb4beb12dd5a2fc94bc1aa55dd21c6f38 + logging: + <<: *logging + volumes: + - msa_repository:/opt/fmc_repository + - msa_dev:/opt/devops/ + \ No newline at end of file diff --git a/docker-compose.ccla.yml b/docker-compose.ccla.yml index 5a2aa9ad..97850457 100644 --- a/docker-compose.ccla.yml +++ b/docker-compose.ccla.yml @@ -6,27 +6,28 @@ x-logging: &logging max-size: 10m mode: non-blocking services: - ccla-scan-app: - environment: - - UBIQUBE_ZAP_TOKEN=7da091fe-63a4-48c0-9bfa-7614c49feb7c - image: ubiqube/cloudclapp-scan:sha-d1303b2b66dfdafd870b501cdb91b9c5a1dc6465 - ccla-scan-env: - entrypoint: - - zap.sh - - -daemon - - -host - - 0.0.0.0 - - -config - - api.addrs.addr.name=.* - - -config - - api.addrs.addr.regex=true - - -config - - api.key=7da091fe-63a4-48c0-9bfa-7614c49feb7c - - -config - - network.localServers.mainProxy.alpn.enabled=false - - -config - - network.localServers.mainProxy.address=0.0.0.0 - image: zaproxy/zap-stable:2.16.1 + # commented to lighten the setup for now and focus on OPSLAB + # ccla-scan-app: + # environment: + # - UBIQUBE_ZAP_TOKEN=7da091fe-63a4-48c0-9bfa-7614c49feb7c + # image: ubiqube/cloudclapp-scan:sha-d1303b2b66dfdafd870b501cdb91b9c5a1dc6465 + # ccla-scan-env: + # entrypoint: + # - zap.sh + # - -daemon + # - -host + # - 0.0.0.0 + # - -config + # - api.addrs.addr.name=.* + # - -config + # - api.addrs.addr.regex=true + # - -config + # - api.key=7da091fe-63a4-48c0-9bfa-7614c49feb7c + # - -config + # - network.localServers.mainProxy.alpn.enabled=false + # - -config + # - network.localServers.mainProxy.address=0.0.0.0 + # image: zaproxy/zap-stable:2.16.1 cloudclapp: depends_on: - msa-api @@ -38,11 +39,11 @@ services: test: - CMD-SHELL - curl --fail http://localhost:8080 - image: ubiqube/cloudclapp:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 + image: ubiqube/cloudclapp:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 cloudclapp-bin: depends_on: - msa-dev - image: ubiqube/msa2-ccla-bin-installer:sha-926a02fb507f520be7670d51eb6036093afbb6bf + image: ubiqube/msa2-ccla-bin-installer:sha-cd02a4308646fb9e5123d53165149580c72ff881 logging: <<: *logging volumes: @@ -51,7 +52,7 @@ services: cloudclapp-wf: depends_on: - msa-dev - image: ubiqube/msa2-ccla-wf-installer:sha-e06e1cb61a4e53930f602e9af9e4a3131ccec01b + image: ubiqube/msa2-ccla-wf-installer:sha-0d045c4348aeee668372c18ca3eeafd9b9adc352 logging: <<: *logging volumes: @@ -95,7 +96,7 @@ services: - ./helm/files/nginx_ccla.conf:/etc/nginx/custom_conf.d/https/nginx_ccla.conf msa-sms: environment: - - UBIQUBE_INSTALL_AWS_CLI=true + - UBIQUBE_INSTALL_AWS_CLI=false opslab: depends_on: - msa-api @@ -103,4 +104,4 @@ services: test: - CMD-SHELL - curl --fail http://localhost:8080 - image: ubiqube/sase-opslab:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 + image: ubiqube/sase-opslab:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 diff --git a/docker-compose.yml b/docker-compose.yml index 9ff7249a..92f35d9c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -89,7 +89,7 @@ services: - CMD-SHELL - curl -s --fail http://localhost:8480/actuator/health |jq -r '.status' |grep '^UP$$' timeout: 5s - image: ubiqube/msa2-api:sha-932b5e70acb87524e4fa1670e75fed08b6ddbb59 + image: ubiqube/msa2-api:sha-ad7709105c0144df73d73a61d28141db30fc5c80 logging: <<: *logging volumes: @@ -113,7 +113,7 @@ services: start_period: 1m test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' timeout: 10s - image: ubiqube/msa2-auth:sha-aa48205a5ece128029a1162f74640f23f70ad6d8 + image: ubiqube/msa2-auth:sha-d45f43ae42bf82e062d8321d747db031a110ae56 msa-broker: environment: ARTEMIS_PASSWORD: simetraehcapa @@ -402,15 +402,6 @@ services: - msa_sms_logs:/opt/sms/logs - msa_monitbulkfiles:/opt/sms/spool/parser - msa_monitbulkfiles_err:/opt/sms/spool/parser-error - msa-ms-inventory-management: - depends_on: - - msa-dev - image: ubiqube/msa2-ms-inventory-management:sha-b28ae96821300678158d3cc5ff2917be559e14de - logging: - <<: *logging - volumes: - - msa_repository:/opt/fmc_repository - - msa_dev:/opt/devops/ msa-parse: depends_on: msa-broker: @@ -454,7 +445,7 @@ services: msa-python-sdk: depends_on: - msa-dev - image: ubiqube/msa2-python-sdk-installer:sha-cf1b245bbd79a7e0f990a772358eeba0d51bfdb1 + image: ubiqube/msa2-python-sdk-installer:sha-2d1ce3340be662ea4fd363515859c9d2688163e7 logging: <<: *logging volumes: @@ -585,24 +576,6 @@ services: image: ubiqube/msa2-svn:sha-2db53c87bde3bbf4f2fc43c1a84898f415fedd07 volumes: - msa_svn:/var/svn - msa-topology-wf: - depends_on: - - msa-dev - image: ubiqube/msa2-wf-topology:sha-123d7c227159dbf3550cd1e2a860728b29d91800 - logging: - <<: *logging - volumes: - - msa_repository:/opt/fmc_repository - - msa_dev:/opt/devops/ - msa-tutorial-wf: - depends_on: - - msa-dev - image: ubiqube/msa2-wf-tutorial:sha-117f4fafb4beb12dd5a2fc94bc1aa55dd21c6f38 - logging: - <<: *logging - volumes: - - msa_repository:/opt/fmc_repository - - msa_dev:/opt/devops/ msa-ui: depends_on: msa-api: @@ -632,18 +605,9 @@ services: test: - CMD-SHELL - curl --fail http://localhost:8080 - image: ubiqube/msa2-ui:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 + image: ubiqube/msa2-ui:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 logging: <<: *logging - msa-workflow-inventory-management: - depends_on: - - msa-dev - image: ubiqube/msa2-workflow-inventory-management:sha-9ee147ef8864e85e4c31ba21a8fae9aaaae1ca3d - logging: - <<: *logging - volumes: - - msa_repository:/opt/fmc_repository - - msa_dev:/opt/devops/ msa-zipkin: image: docker.io/openzipkin/zipkin ports: diff --git a/front/version.html b/front/version.html index bce3936a..4e4d72af 100644 --- a/front/version.html +++ b/front/version.html @@ -1 +1 @@ -{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"660"} \ No newline at end of file +{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"685"} \ No newline at end of file diff --git a/helm/Chart.yaml b/helm/Chart.yaml index b30284a4..0cbdaac2 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 3.2.2.660 +appVersion: 3.2.2.685 description: Helm chart for msa and cloudclapp name: msa type: application diff --git a/helm/images.application.txt b/helm/images.application.txt index 2651ad34..1095bfac 100644 --- a/helm/images.application.txt +++ b/helm/images.application.txt @@ -8,7 +8,7 @@ busybox:1.37 docker.io/openzipkin/zipkin -docker.io/ubiqube/msa2-ccla-wf-installer:sha-e06e1cb61a4e53930f602e9af9e4a3131ccec01b +docker.io/ubiqube/msa2-ccla-wf-installer:sha-0d045c4348aeee668372c18ca3eeafd9b9adc352 docker.io/ubiqube/msa2-mariadb:sha-f2fb3a804060df12cbece26b2485b1f7934c63c7 @@ -20,15 +20,15 @@ opensearchproject/opensearch-dashboards:1.3.20 ubiqube/cloudclapp-scan:sha-d1303b2b66dfdafd870b501cdb91b9c5a1dc6465 -ubiqube/cloudclapp:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 +ubiqube/cloudclapp:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 -ubiqube/sase-opslab:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 +ubiqube/sase-opslab:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 ubiqube/msa2-alarm:sha-948779a57daf0378d645693404181162a5a2fb6b -ubiqube/msa2-api:sha-932b5e70acb87524e4fa1670e75fed08b6ddbb59 +ubiqube/msa2-api:sha-ad7709105c0144df73d73a61d28141db30fc5c80 -ubiqube/msa2-auth:sha-aa48205a5ece128029a1162f74640f23f70ad6d8 +ubiqube/msa2-auth:sha-d45f43ae42bf82e062d8321d747db031a110ae56 ubiqube/msa2-broker:sha-7090331f69a90a4d339072bd52b6b4b43744459c @@ -68,7 +68,7 @@ ubiqube/msa2-snmptrap:sha-2b8dc239672da2e96165bd4e751b7c84330343ca ubiqube/msa2-svn:sha-2db53c87bde3bbf4f2fc43c1a84898f415fedd07 -ubiqube/msa2-ui:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 +ubiqube/msa2-ui:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 zaproxy/zap-stable:2.16.1 diff --git a/helm/templates/ccla/ccla-bin-installer.yaml b/helm/templates/ccla/ccla-bin-installer.yaml new file mode 100644 index 00000000..aedd7c2c --- /dev/null +++ b/helm/templates/ccla/ccla-bin-installer.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.installCCLA .Values.ccla_bin.enabled -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: ccla-bin-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: ccla-bin-installer-job + image: "{{ .Values.ccla_bin.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/ccla/key-vault.yaml b/helm/templates/ccla/key-vault.yaml index a67a9916..d4263c7c 100644 --- a/helm/templates/ccla/key-vault.yaml +++ b/helm/templates/ccla/key-vault.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.installCCLA .Values.key_vault.enabled -}} +{{- if and (or .Values.installCCLA .Values.installOpslab) .Values.key_vault.enabled -}} apiVersion: apps/v1 kind: Deployment metadata: diff --git a/helm/templates/ccla/opslab.yaml b/helm/templates/ccla/opslab.yaml new file mode 100644 index 00000000..edddfbf2 --- /dev/null +++ b/helm/templates/ccla/opslab.yaml @@ -0,0 +1,67 @@ +{{- if .Values.installOpslab -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: opslab +spec: + replicas: 1 + selector: + matchLabels: + app: opslab + template: + metadata: + labels: + app: opslab + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: opslab + image: {{ .Values.opslab.image }} + ports: + - containerPort: 8080 + resources: + {{- toYaml .Values.ccla.resources | nindent 12 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: opslab +spec: + selector: + app: opslab + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 +{{- if .Values.global.ingress.enable }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: opslab-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: +{{- if .Values.global.tls }} + tls: +{{- toYaml .Values.global.tls | nindent 4 }} +{{- end }} + rules: + - http: + paths: + - path: /opslab(/|$)(.*) + pathType: Prefix + backend: + service: + name: opslab + port: + number: 8080 +{{- if .Values.global.tls }} + host: {{ index (index .Values.global.tls 0).hosts 0 | quote }} +{{- end }} +{{- end -}} +{{- end -}} + diff --git a/helm/templates/cloudiamo/automation-installer.yaml b/helm/templates/cloudiamo/automation-installer.yaml new file mode 100644 index 00000000..be93b6cc --- /dev/null +++ b/helm/templates/cloudiamo/automation-installer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.installCCLA -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-automation-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-automation-installer-job + image: "{{ .Values.cld_automation_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/cloudiamo/blueprint-installer.yaml b/helm/templates/cloudiamo/blueprint-installer.yaml new file mode 100644 index 00000000..bd251b27 --- /dev/null +++ b/helm/templates/cloudiamo/blueprint-installer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.installCCLA -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-blueprint-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-blueprint-installer-job + image: "{{ .Values.cld_blueprint_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/cloudiamo/cisco-ms-installer.yaml b/helm/templates/cloudiamo/cisco-ms-installer.yaml new file mode 100644 index 00000000..bce2a830 --- /dev/null +++ b/helm/templates/cloudiamo/cisco-ms-installer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.installCCLA -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-cisco-ms-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-cisco-ms-installer-job + image: "{{ .Values.cld_cisco_ms_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/cloudiamo/fortinet-ms-installer.yaml b/helm/templates/cloudiamo/fortinet-ms-installer.yaml new file mode 100644 index 00000000..ed4795d3 --- /dev/null +++ b/helm/templates/cloudiamo/fortinet-ms-installer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.installCCLA -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-fortinet-ms-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-fortinet-ms-installer-job + image: "{{ .Values.cld_fortinet_ms_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/cloudiamo/linux-ms-installer.yaml b/helm/templates/cloudiamo/linux-ms-installer.yaml new file mode 100644 index 00000000..7e07469e --- /dev/null +++ b/helm/templates/cloudiamo/linux-ms-installer.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-linux-ms-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-linux-ms-installer-job + image: "{{ .Values.cld_linux_ms_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 \ No newline at end of file diff --git a/helm/templates/cloudiamo/netskope-ms-installer.yaml b/helm/templates/cloudiamo/netskope-ms-installer.yaml new file mode 100644 index 00000000..f84cd456 --- /dev/null +++ b/helm/templates/cloudiamo/netskope-ms-installer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.installCCLA -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-netskope-ms-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-netskope-ms-installer-job + image: "{{ .Values.cld_netskope_ms_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/cloudiamo/paloalto-ngfw-ms-installer.yaml b/helm/templates/cloudiamo/paloalto-ngfw-ms-installer.yaml new file mode 100644 index 00000000..840ba5d2 --- /dev/null +++ b/helm/templates/cloudiamo/paloalto-ngfw-ms-installer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.installCCLA -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-paloalto-ngfw-ms-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-paloalto-ngfw-ms-installer-job + image: "{{ .Values.cld_paloalto_ngfw_ms_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/cloudiamo/paloalto-prisma-ms-installer.yaml b/helm/templates/cloudiamo/paloalto-prisma-ms-installer.yaml new file mode 100644 index 00000000..3fd50237 --- /dev/null +++ b/helm/templates/cloudiamo/paloalto-prisma-ms-installer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.installCCLA -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-paloalto-prisma-ms-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-paloalto-prisma-ms-installer-job + image: "{{ .Values.cld_paloalto_prisma_ms_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/cloudiamo/zscaler-ms-installer.yaml b/helm/templates/cloudiamo/zscaler-ms-installer.yaml new file mode 100644 index 00000000..5c19923b --- /dev/null +++ b/helm/templates/cloudiamo/zscaler-ms-installer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.installCCLA -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: cld-zscaler-ms-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: cld-zscaler-ms-installer-job + image: "{{ .Values.cld_zscaler_ms_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 +{{- end -}} \ No newline at end of file diff --git a/helm/templates/mswf/ms-inventory-management.yaml b/helm/templates/mswf/ms-inventory-management.yaml new file mode 100644 index 00000000..aad26680 --- /dev/null +++ b/helm/templates/mswf/ms-inventory-management.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: msa-ms-inventory-management-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: msa-ms-inventory-management-installer-job + image: "{{ .Values.msa_ms_inventory_management_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 \ No newline at end of file diff --git a/helm/templates/mswf/workflow-inventory-management.yaml b/helm/templates/mswf/workflow-inventory-management.yaml new file mode 100644 index 00000000..53985c06 --- /dev/null +++ b/helm/templates/mswf/workflow-inventory-management.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: msa-workflow-inventory-management-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: msa-workflow-inventory-management-installer-job + image: "{{ .Values.msa_workflow_inventory_management_installer.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 \ No newline at end of file diff --git a/helm/templates/mswf/workflow-topology.yaml b/helm/templates/mswf/workflow-topology.yaml new file mode 100644 index 00000000..a6799053 --- /dev/null +++ b/helm/templates/mswf/workflow-topology.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: HookSucceeded + name: msa-workflow-topology-installer-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - env: + - name: ENABLE_K8S + value: "true" + name: msa-workflow-topology-installer-job + image: "{{ .Values.msa_topology_wf.image }}" + imagePullPolicy: {{ .Values.imagePullPolicy }} + volumeMounts: + - mountPath: /opt/fmc_repository + name: msa-repository + restartPolicy: Never + volumes: + - name: msa-repository + persistentVolumeClaim: + claimName: msa-repository + backoffLimit: 4 \ No newline at end of file diff --git a/helm/templates/version-configmap.yaml b/helm/templates/version-configmap.yaml index 3a5d5405..7492b4c8 100644 --- a/helm/templates/version-configmap.yaml +++ b/helm/templates/version-configmap.yaml @@ -3,4 +3,4 @@ kind: ConfigMap metadata: name: version-configmap data: - index.html: '{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"660"}' + index.html: '{"jira_version":"3.2.2","msa_version":"3.2.2","ccla_version":"1.2.2","build":"685"}' diff --git a/helm/values.yaml b/helm/values.yaml index 96e11a9b..89b2d429 100644 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -103,7 +103,7 @@ msa_alarm: ephemeral-storage: "100Mi" msa_api: - image: ubiqube/msa2-api:sha-932b5e70acb87524e4fa1670e75fed08b6ddbb59 + image: ubiqube/msa2-api:sha-ad7709105c0144df73d73a61d28141db30fc5c80 replicas: 1 resources: limits: @@ -115,7 +115,7 @@ msa_api: ephemeral-storage: "100Mi" msa_auth: - image: ubiqube/msa2-auth:sha-aa48205a5ece128029a1162f74640f23f70ad6d8 + image: ubiqube/msa2-auth:sha-d45f43ae42bf82e062d8321d747db031a110ae56 replicas: 1 admin_user: username: admin @@ -365,7 +365,7 @@ msa_svn: password: UB1s5n msa_topology_wf: - image: docker.io/ubiqube/msa2-wf-topology:sha-123d7c227159dbf3550cd1e2a860728b29d91800 + image: docker.io/ubiqube/msa2-wf-topology:sha-a0bd68379dbd582cb727a85b0daf5a090239def1 msa-tutorial-wf: image: docker.io/ubiqube/msa2-wf-tutorial:sha-117f4fafb4beb12dd5a2fc94bc1aa55dd21c6f38 @@ -402,7 +402,7 @@ msa_ui: value: "true" - name: FEATURE_WORKFLOW_OWNER value: "false" - image: ubiqube/msa2-ui:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 + image: ubiqube/msa2-ui:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 replicas: 1 resources: requests: @@ -444,36 +444,43 @@ openshift: ### Cloudclapp ### installCCLA: false -ccla_blueprint: - image: docker.io/ubiqube/cld-blueprint-installer:sha-1f4dac22a1467dab2daf4c10acdefb820bb315cc - -ccla_automation: +installOpslab: true +cld_automation_installer: image: docker.io/ubiqube/cld-automation-installer:sha-23bcb2379abdb07ea85c0923788dcfcd8f4bae32 -cld_cisco_ms: +cld_blueprint_installer: + image: docker.io/ubiqube/cld-blueprint-installer:sha-1f4dac22a1467dab2daf4c10acdefb820bb315cc + +cld_cisco_ms_installer: image: docker.io/ubiqube/cld-cisco-ms-installer:sha-4251bc22e2900ef0606f9beb458a073c0b903857 cld_fortinet_ms_installer: image: docker.io/ubiqube/cld-fortinet-ms-installer:sha-d62f1dc7524baebee1acc5123e73084d2e382a26 -cld_netskope_ms: +cld_netskope_ms_installer: image: docker.io/ubiqube/cld-netskope-ms-installer:sha-1f77ef4f95d0d6374729f4a6cf4a8daa024668a7 cld_paloalto_ngfw_ms_installer: image: docker.io/ubiqube/cld-paloalto-ngfw-installer:sha-35368719b7558753f37ea5df16595cdc3e29acc8 -cld-paloalto-prisma-ms: - image: ubiqube/cld-paloalto-prisma-ms-installer:sha-29e9ec7352d027301226eb137c2179dea60548a8 +cld_paloalto_prisma_ms_installer: + image: docker.io/ubiqube/cld-paloalto-prisma-ms-installer:sha-29e9ec7352d027301226eb137c2179dea60548a8 + +cld_zscaler_ms_installer: + image: docker.io/ubiqube/cld-zscaler-ms-installer:sha-93f728c321ed79d10be24fa00ce8bf816d186a45 + +cld_linux_ms_installer: + image: docker.io/ubiqube/cld-linux-ms-installer:sha-ce1222345c5ff881ed7c3c060185e674d2594df0 ccla_wf: - image: docker.io/ubiqube/msa2-ccla-wf-installer:sha-e06e1cb61a4e53930f602e9af9e4a3131ccec01b + image: docker.io/ubiqube/msa2-ccla-wf-installer:sha-0d045c4348aeee668372c18ca3eeafd9b9adc352 ccla_bin: enabled: true - image: docker.io/ubiqube/msa2-ccla-bin-installer:sha-926a02fb507f520be7670d51eb6036093afbb6bf + image: docker.io/ubiqube/msa2-ccla-bin-installer:sha-cd02a4308646fb9e5123d53165149580c72ff881 ccla: - image: ubiqube/cloudclapp:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 + image: ubiqube/cloudclapp:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 replicas: 1 resources: requests: @@ -482,7 +489,7 @@ ccla: ephemeral-storage: "50Mi" opslab: - image: ubiqube/sase-opslab:sha-698aaf6115f96cba55f6ed23e75c444d7017e5e1 + image: ubiqube/sase-opslab:sha-997f8e0dc9c88819dd423d51e1a8a91d43e5cee8 replicas: 1 resources: requests: