From 3bcd3a1d619549a5e610c80f0432b688f47d23d5 Mon Sep 17 00:00:00 2001 From: MatheuslFavaretto Date: Tue, 3 Sep 2024 20:17:45 -0300 Subject: [PATCH 1/6] add zabbix --- Makefile | 7 + charts/zabbix/values.yaml | 977 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 984 insertions(+) create mode 100644 charts/zabbix/values.yaml diff --git a/Makefile b/Makefile index e552fa7..5fd10c1 100644 --- a/Makefile +++ b/Makefile @@ -50,3 +50,10 @@ create-prometheus-adapter: delete-prometheus-adapter: helm uninstall prometheus-adapter -n custom-metrics --delete-namespace + +create-zabbix: + helm install my-zabbix-test zabbix-community/zabbix --version 5.0.1 -n zabbix --create-namespace -f charts/zabbix/values.yaml + +delete-zabbix: + helm uninstall my-zabbix-test -n zabbix && kubectl delete namespace zabbix + diff --git a/charts/zabbix/values.yaml b/charts/zabbix/values.yaml new file mode 100644 index 0000000..24ac0c2 --- /dev/null +++ b/charts/zabbix/values.yaml @@ -0,0 +1,977 @@ +# Default values for zabbix. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Global configurations +global: + # -- Reference to one or more secrets to be used when pulling images. + # + # For example: + # imagePullSecrets: + # - name: "image-pull-secret" + imagePullSecrets: [] + + # -- Labels to apply to all resources. + commonLabels: {} + +# -- Zabbix components (server, agent, web frontend, ...) image tag to use. +#This helm chart is compatible with non-LTS version of Zabbix, that include important changes and functionalities. +#But by default this helm chart will install the latest LTS version (example: 7.0.x). +#See more info in [Zabbix Life Cycle & Release Policy](https://www.zabbix.com/life_cycle_and_release_policy) page +#When you want use a non-LTS version (example: 6.4.x), you have to set this yourself. You can change version +#here or overwrite in each component (example: zabbixserver.image.tag, etc). +zabbixImageTag: ubuntu-7.0.0 + +# **Zabbix Postgresql access / credentials** configurations +# with this dict, you can set unified PostgreSQL access credentials, IP and so on for both Zabbix Server and Zabbix web frontend +# you can either chose from having all this in one named (preexisting) secret or setting the values one by one with vars +# whatever set here overrides the credential settings within the "zabbixServer" and "zabbixWeb" sections. +postgresAccess: + # under this section you can configure PostgreSQL access and credentials centrally for the entire helm chart. + # All relevant components installed by this chart will respect it: zabbixServer, zabbixWeb and postgresql (if enabled) + # + # USING ONE SECRET CONTAINING ALL DB RELEVANT SETTINGS + # PostgreSQL access details all in one existing secret (matches the structure of secrets the CrunchyData pgo operator generates) + # if this option is chosen the below listed settings are being ignored + # the secret must contain the following keys: + # * host + # * port + # * user + # * password + # * database + # -- Whether to use the unified PostgreSQL access secret + useUnifiedSecret: true + # -- Name of one secret for unified configuration of PostgreSQL access + unifiedSecretName: zabbixdb-pguser-zabbix + # -- automatically create secret if not already present (works only in combination with postgresql.enabled=true) + unifiedSecretAutoCreate: true + # + # If you do NOT want to use one unified secret for all settings, you can still set the credentials manually here. + # These settings will be used for all components of this chart where it makes sense (zabbix server, postgresql, + # web frontend, ...) + # -- Address of database host - ignored if postgresql.enabled=true + host: "zabbix-postgresql" + # -- Port of database host - ignored if postgresql.enabled=true + port: "5432" + # -- User of database + user: "zabbix" + # -- Name of a secret used for Postgres Password, if set, it overrules the POSTGRES_PASSWORD value + #passwordSecret: mysecret + # -- Key of the secret used for Postgres Password, requires POSTGRES_PASSWORD_SECRET, defaults to password + #passwordSecretKey: "password" + # -- Password of database - ignored if passwordSecret is set + password: "zabbix" + # -- Name of database + database: "zabbix" + +# **Zabbix Server** configurations +zabbixServer: + # -- Enables use of **Zabbix Server** + enabled: true + # -- Number of replicas of ``zabbixServer`` module + replicaCount: 1 + # -- Set permissive podAntiAffinity to spread replicas over cluster nodes if replicaCount>1 + podAntiAffinity: true + # -- Optional set true open a port direct on node where Zabbix Server runs + hostPort: false + # -- Optional set hostIP different from 0.0.0.0 to open port only on this IP + hostIP: 0.0.0.0 + # -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) + resources: {} + image: + # -- Zabbix Server Docker image name + repository: zabbix/zabbix-server-pgsql + # -- Zabbix Server Docker image tag, if you want to override zabbixImageTag + tag: null + # -- Pull policy of Docker image + pullPolicy: IfNotPresent + # -- List of dockerconfig secrets names to use when pulling images + pullSecrets: [] + # -- Automatically clean orphaned ha nodes from ha_nodes db table + haNodesAutoClean: + enabled: true + image: + # -- Postgresql Docker image name: chose one of "postgres" or "timescale/timescaledb" + repository: postgres + # -- Tag of Docker image of Postgresql server, choice "16" for postgres "2.14.2-pg16" for timescaledb + # (Zabbix supports TimescaleDB 2.1.0-2.14.x. More info: https://www.zabbix.com/documentation/7.0/en/manual/installation/requirements) + tag: 16 + pullPolicy: IfNotPresent + pullSecrets: [] + schedule: "0 1 * * *" + concurrencyPolicy: "Replace" + deleteOlderThanSeconds: 3600 + # -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) + resources: {} + # -- Extra environment variables. A list of additional environment variables. + extraEnv: [] + # -- Additional volumeMounts to the cronjob hanodes autoclean + extraVolumeMounts: [] + # -- Additional containers to start within the cronjob hanodes autoclean + extraContainers: [] + # -- Additional init containers to start within the cronjob hanodes autoclean + extraInitContainers: [] + # -- Additional volumes to make available to the cronjob hanodes autoclean + extraVolumes: [] + # -- Additional specifications to the cronjob hanodes autoclean + extraPodSpecs: {} + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + # -- Labels to add to the cronjob for ha nodes autoclean + cronjobLabels: {} + jobDBSchema: + # -- Annotations to add to the jobs + jobAnnotations: {} + # -- Labels to add to the jobs + jobLabels: {} + # -- Additional init containers to start within the Zabbix Server Job DB Schema pod + extraInitContainers: [] + # -- Additional containers to start within the Zabbix Server Job DB Schema pod + extraContainers: [] + # -- Additional specifications to the Zabbix Server Job DB Schema pod + extraPodSpecs: {} + # -- Additional volumeMounts to the Zabbix Server Job DB Schema pod + extraVolumeMounts: [] + # -- Additional volumes to make available to the Zabbix Server Job DB Schema pod + extraVolumes: [] + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + service: + # -- Type of service to expose the application. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + #More details: https://kubernetes.io/docs/concepts/services-networking/service/ + type: ClusterIP + # -- clusterIP is the IP address of the service and is usually assigned randomly. + #If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service. + clusterIP: + # -- externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's "externally-facing" addresses + #(NodePorts, ExternalIPs, and LoadBalancer IPs). "Local" to preserve sender's IP address. Please note that this might not work on multi-node clusters, + #depending on your network settings. + #externalTrafficPolicy: Local + # -- externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. + #These IPs are not managed by Kubernetes. + externalIPs: [] + # -- Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying + #the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerIP: "" + # -- If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer + #will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerSourceRanges: [] + # -- loadBalancerClass is the class of the load balancer implementation this Service belongs to. + #If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or + #"example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. + #If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, + #but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services + #with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. + #This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. + #This field will be wiped when a service is updated to a non 'LoadBalancer' type. + loadBalancerClass: "" + # -- Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. + #Must be ClientIP or None. Defaults to None. More info: + #https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: None + # -- Port of service in Kubernetes cluster + port: 10051 + # -- NodePort port to allocate on each node (only if service.type = NodePort or Loadbalancer) + nodePort: 31051 + # -- Annotations for the zabbix-server service + annotations: {} + # metallb.universe.tf/address-pool: production-public-ips + # -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/server-pgsql/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml + extraEnv: [] + #- name: ENABLE_TIMESCALEDB + # value: "true" + # -- Annotations to add to the deployment + deploymentAnnotations: {} + # -- Labels to add to the deployment + deploymentLabels: {} + # -- Annotations to add to the containers + containerAnnotations: {} + # -- Labels to add to the containers + containerLabels: {} + # -- Additional volumeMounts to the Zabbix Server container + extraVolumeMounts: [] + # -- Additional containers to start within the Zabbix Server pod + extraContainers: [] + # -- Additional init containers to start within the Zabbix Server pod + extraInitContainers: [] + # -- Additional volumes to make available to the Zabbix Server pod + extraVolumes: [] + # -- Additional specifications to the Zabbix Server pod + extraPodSpecs: {} + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + # -- The kubelet uses liveness probes to know when to restart a container. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + livenessProbe: {} + # -- The kubelet uses readiness probes to know when a container is ready to start accepting traffic. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + readinessProbe: {} + # -- The kubelet uses startup probes to know when a container application has started. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + startupProbe: {} + +# **PostgreSQL** configurations +postgresql: + # -- Create a database using Postgresql + enabled: true + image: + # -- Postgresql Docker image name: chose one of "postgres" or "timescale/timescaledb" + repository: postgres + # -- Tag of Docker image of Postgresql server, choice "16" for postgres "2.14.2-pg16" for timescaledb + # (Zabbix supports TimescaleDB 2.1.0-2.14.x. More info: https://www.zabbix.com/documentation/7.0/en/manual/installation/requirements) + tag: 16 + # -- Pull policy of Docker image + pullPolicy: IfNotPresent + # -- List of dockerconfig secrets names to use when pulling images + pullSecrets: [] + # -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) + resources: {} + persistence: + # -- Whether to enable persistent storage for the postgres container or not + enabled: false + # -- Existing persistent volume claim name to be used to store posgres data + existingClaimName: false + # -- Size of the PVC to be automatically generated + storageSize: 5Gi + # -- Kubernetes uses volume access modes to match PersistentVolumeClaims and PersistentVolumes. See: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes + #claim_access_mode: "ReadWriteOnce" + # -- Storage PVC storageclass to use + #storageClass: my-storage-class + service: + # -- Type of service to expose the application. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + # More details: https://kubernetes.io/docs/concepts/services-networking/service/ + type: ClusterIP + # -- clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, + #is in-range (as per system configuration), and is not in use, it will be allocated to the service. + clusterIP: + # -- Port of service in Kubernetes cluster + port: 5432 + # -- Annotations for the zabbix-server service + annotations: {} + # metallb.universe.tf/address-pool: production-public-ips + # -- Extra Postgresql runtime parameters ("-c" options) + extraRuntimeParameters: + max_connections: 50 + # -- Extra environment variables. A list of additional environment variables. + extraEnv: [] + # -- Annotations to add to the statefulset + statefulSetAnnotations: {} + # -- Labels to add to the statefulset + statefulSetLabels: {} + # -- Annotations to add to the containers + containerAnnotations: {} + # -- Labels to add to the containers + containerLabels: {} + # -- Additional volumeMounts to the postgresql container + extraVolumeMounts: [] + # -- Additional containers to start within the postgresql pod + extraContainers: [] + # -- Additional init containers to start within the postgresql pod + extraInitContainers: [] + # -- Additional volumes to make available to the postgresql pod + extraVolumes: [] + # -- Additional specifications to the postgresql pod + extraPodSpecs: {} + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + # -- The kubelet uses liveness probes to know when to restart a container. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + livenessProbe: {} + # -- The kubelet uses readiness probes to know when a container is ready to start accepting traffic. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + readinessProbe: {} + # -- The kubelet uses startup probes to know when a container application has started. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + startupProbe: {} + +# **Zabbix Proxy** configurations +zabbixProxy: + # -- Enables use of **Zabbix Proxy** + enabled: false + # -- Number of replicas of ``zabbixProxy`` module + replicaCount: 1 + # -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) + resources: {} + image: + # -- Zabbix Proxy Docker image name + repository: zabbix/zabbix-proxy-sqlite3 + # -- Zabbix Proxy Docker image tag, if you want to override zabbixImageTag + tag: null + # -- Pull policy of Docker image + pullPolicy: IfNotPresent + # -- List of dockerconfig secrets names to use when pulling images + pullSecrets: [] + # -- The variable allows to switch Zabbix Proxy mode. By default, value is 0 - active proxy. Allowed values are 0 and 1. + ZBX_PROXYMODE: 0 + # -- Zabbix Proxy hostname + # Case sensitive hostname + ZBX_HOSTNAME: zabbix-proxy # This variable is unique, case sensitive hostname. + # -- Zabbix Server host + ZBX_SERVER_HOST: zabbix-zabbix-server + # -- Zabbix Server port + ZBX_SERVER_PORT: 10051 + # ZBX_LOADMODULE: dummy1.so,dummy2.so # The variable is list of comma separated loadable Zabbix modules. It works with volume /var/lib/zabbix/modules. + # The variable is used to specify debug level, from 0 to 5 + ZBX_DEBUGLEVEL: 4 + # The variable is used to specify timeout for processing checks. By default, value is 4. + ZBX_TIMEOUT: 4 + # -- The variable enable communication with Zabbix Java Gateway to collect Java related checks. By default, value is false. + ZBX_JAVAGATEWAY_ENABLE: false + # -- Cache size + ZBX_VMWARECACHESIZE: 128M + service: + # -- Type of service to expose the application. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + #More details: https://kubernetes.io/docs/concepts/services-networking/service/ + type: ClusterIP + # -- clusterIP is the IP address of the service and is usually assigned randomly. + #If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service. + clusterIP: + # -- externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's "externally-facing" addresses + #(NodePorts, ExternalIPs, and LoadBalancer IPs). "Local" to preserve sender's IP address. Please note that this might not work on multi-node clusters, + #depending on your network settings. + #externalTrafficPolicy: Local + # -- externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. + #These IPs are not managed by Kubernetes. + externalIPs: [] + # -- Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying + #the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerIP: "" + # -- If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer + #will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerSourceRanges: [] + # -- loadBalancerClass is the class of the load balancer implementation this Service belongs to. + #If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or + #"example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. + #If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, + #but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services + #with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. + #This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. + #This field will be wiped when a service is updated to a non 'LoadBalancer' type. + loadBalancerClass: "" + # -- Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. + #Must be ClientIP or None. Defaults to None. More info: + #https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: None + # -- Port of service in Kubernetes cluster + port: 10051 + # -- NodePort port to allocate on each node (only if service.type = NodePort or Loadbalancer) + nodePort: 31053 + # -- Annotations for the zabbix-proxy service + annotations: {} + # metallb.universe.tf/address-pool: production-public-ips + # -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/proxy-sqlite3/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml + extraEnv: [] + # -- Annotations to add to the statefulset + statefulSetAnnotations: {} + # -- Labels to add to the statefulset + statefulSetLabels: {} + # -- Annotations to add to the containers + containerAnnotations: {} + # -- Labels to add to the containers + containerLabels: {} + # -- Additional volumeMounts to the Zabbix Proxy container + extraVolumeMounts: [] + # -- Additional containers to start within the Zabbix Proxy pod + extraContainers: [] + # -- Additional init containers to start within the Zabbix Proxy pod + extraInitContainers: [] + # -- Additional volumes to make available to the Zabbix Proxy pod + extraVolumes: [] + # -- Additional specifications to the Zabbix Proxy pod + extraPodSpecs: {} + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + # -- Extra volumeClaimTemplate for zabbixProxy statefulset + extraVolumeClaimTemplate: [] + # -- The kubelet uses liveness probes to know when to restart a container. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + livenessProbe: {} + # -- The kubelet uses readiness probes to know when a container is ready to start accepting traffic. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + readinessProbe: {} + # -- The kubelet uses startup probes to know when a container application has started. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + startupProbe: {} + +# **Zabbix Agent** configurations +zabbixAgent: + # -- Enables use of **Zabbix Agent** + enabled: true + # -- Its is a default mode. Zabbix-agent will run as sidecar in zabbix-server and zabbix-proxy pods. Disable this mode if you want to run zabbix-agent as daemonSet + runAsSidecar: true + # -- Enable this mode if you want to run zabbix-agent as daemonSet. The 'zabbixAgent.runAsSidecar' option must be false. + runAsDaemonSet: false + # -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) + resources: {} + image: + # -- Zabbix Agent Docker image name. Can use zabbix/zabbix-agent or zabbix/zabbix-agent2 + repository: zabbix/zabbix-agent2 + # -- Zabbix Agent Docker image tag, if you want to override zabbixImageTag + tag: null + # -- Pull policy of Docker image + pullPolicy: IfNotPresent + # -- List of dockerconfig secrets names to use when pulling images + pullSecrets: [] + # -- Zabbix Server host + ZBX_SERVER_HOST: 0.0.0.0/0 + # -- Zabbix Server port + ZBX_SERVER_PORT: 10051 + # -- This variable is boolean (true or false) and enables or disables feature of passive checks. By default, value is true + ZBX_PASSIVE_ALLOW: true + # -- The variable is comma separated list of allowed Zabbix Server or proxy hosts for connections to Zabbix Agent container. Example: Server=127.0.0.1,192.168.1.0/24,::1,2001:db8::/32,zabbix.example.com + #ZBX_PASSIVESERVERS: '' + # -- This variable is boolean (true or false) and enables or disables feature of active checks + ZBX_ACTIVE_ALLOW: false + # -- The variable is comma separated list of allowed Zabbix Server or proxy hosts for connections to Zabbix Agent container. You may specify port. + #ZBX_ACTIVESERVERS: '' + # -- The variable is list of comma separated loadable Zabbix modules. It works with volume /var/lib/zabbix/modules. + #ZBX_LOADMODULE: dummy1.so,dummy2.so + # -- The variable is used to specify debug level, from 0 to 5 + ZBX_DEBUGLEVEL: 3 + # -- The variable is used to specify timeout for processing checks. By default, value is 4. + ZBX_TIMEOUT: 4 + service: + # -- Type of service to expose the application. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + #More details: https://kubernetes.io/docs/concepts/services-networking/service/ + type: ClusterIP + # -- clusterIP is the IP address of the service and is usually assigned randomly. + #If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service. + clusterIP: + # -- externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's "externally-facing" addresses + #(NodePorts, ExternalIPs, and LoadBalancer IPs). "Local" to preserve sender's IP address. Please note that this might not work on multi-node clusters, + #depending on your network settings. + #externalTrafficPolicy: Local + # -- externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. + #These IPs are not managed by Kubernetes. + externalIPs: [] + # -- Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying + #the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerIP: "" + # -- If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer + #will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerSourceRanges: [] + # -- loadBalancerClass is the class of the load balancer implementation this Service belongs to. + #If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or + #"example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. + #If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, + #but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services + #with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. + #This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. + #This field will be wiped when a service is updated to a non 'LoadBalancer' type. + loadBalancerClass: "" + # -- Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. + #Must be ClientIP or None. Defaults to None. More info: + #https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: None + # -- Port of service in Kubernetes cluster + port: 10050 + # -- NodePort port to allocate on each node (only if service.type = NodePort or Loadbalancer) + nodePort: 31050 + # -- Annotations for the zabbix-agent service + annotations: {} + # metallb.universe.tf/address-pool: production-public-ips + # -- If true, agent pods mounts host / at /host/root + hostRootFsMount: true + # -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/agent2/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml + extraEnv: [] + # -- Additional volumeMounts to the zabbix Agent container + extraVolumeMounts: [] + # -- Annotations to add to the daemonSet + daemonSetAnnotations: {} + # -- Labels to add to the daemonSet + daemonSetLabels: {} + # -- Labels to add to the deployment + deploymentLabels: {} + # -- Annotations to add to the containers + containerAnnotations: {} + # -- Labels to add to the containers + containerLabels: {} + # -- Additional containers to start within the Zabbix Agent pod + extraContainers: [] + # -- Additional init containers to start within the Zabbix Agent pod + extraInitContainers: [] + # -- Additional volumes to make available to the Zabbix Agent pod + extraVolumes: [] + # -- Additional specifications to the Zabbix Agent pod + extraPodSpecs: {} + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + # -- The kubelet uses liveness probes to know when to restart a container. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + livenessProbe: + tcpSocket: + # -- Port number/alias name of the container + port: zabbix-agent + timeoutSeconds: 3 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + # -- The kubelet uses readiness probes to know when a container is ready to start accepting traffic. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + readinessProbe: {} + # -- The kubelet uses startup probes to know when a container application has started. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + startupProbe: + tcpSocket: + # -- Port number/alias name of the container + port: zabbix-agent + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 5 + successThreshold: 1 + +# **Zabbix Web** configurations +zabbixWeb: + # -- Enables use of **Zabbix Web** + enabled: true + # -- Number of replicas of ``zabbixWeb`` module + replicaCount: 1 + # -- set permissive podAntiAffinity to spread replicas over cluster nodes if replicaCount>1 + podAntiAffinity: true + # -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) + resources: {} + image: + # -- Zabbix Web Docker image name + repository: zabbix/zabbix-web-nginx-pgsql + # -- Zabbix Web Docker image tag, if you want to override zabbixImageTag + tag: null + # -- Pull policy of Docker image + pullPolicy: IfNotPresent + # -- List of dockerconfig secrets names to use when pulling images + pullSecrets: [] + # -- Certificate containing certificates for SAML configuration + #samlCertsSecretName: zabbix-web-samlcerts + service: + # -- Type of service to expose the application. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + #More details: https://kubernetes.io/docs/concepts/services-networking/service/ + type: ClusterIP + # -- clusterIP is the IP address of the service and is usually assigned randomly. + #If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service. + clusterIP: + # -- externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's "externally-facing" addresses + #(NodePorts, ExternalIPs, and LoadBalancer IPs). "Local" to preserve sender's IP address. Please note that this might not work on multi-node clusters, + #depending on your network settings. + #externalTrafficPolicy: Local + # -- externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. + #These IPs are not managed by Kubernetes. + externalIPs: [] + # -- Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying + #the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerIP: "" + # -- If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer + #will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerSourceRanges: [] + # -- loadBalancerClass is the class of the load balancer implementation this Service belongs to. + #If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or + #"example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. + #If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, + #but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services + #with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. + #This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. + #This field will be wiped when a service is updated to a non 'LoadBalancer' type. + loadBalancerClass: "" + # -- Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. + #Must be ClientIP or None. Defaults to None. More info: + #https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: None # -- Port of service in Kubernetes cluster + port: 80 + # -- NodePort port to allocate on each node (only if service.type = NodePort or Loadbalancer) + nodePort: 31080 + # -- Annotations for the Zabbix Web + annotations: {} + # metallb.universe.tf/address-pool: production-public-ips + # -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/web-apache-pgsql/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml + extraEnv: [] + # - name: ZBX_SSO_SETTINGS + # value: '{"baseurl": "https://zabbix.example.com"}' + # - name: ZBX_SERVER_NAME + # value: Demo Zabbix + # -- Annotations to add to the deployment + deploymentAnnotations: {} + # -- Labels to add to the deployment + deploymentLabels: {} + # -- Annotations to add to the containers + containerAnnotations: {} + # -- Labels to add to the containers + containerLabels: {} + # -- Additional volumeMounts to the Zabbix Web container + extraVolumeMounts: [] + # -- Additional containers to start within the Zabbix Web pod + extraContainers: [] + # -- Additional init containers to start within the Zabbix Web pod + extraInitContainers: [] + # -- Additional volumes to make available to the Zabbix Web pod + extraVolumes: [] + # -- Additional specifications to the Zabbix Web pod + extraPodSpecs: {} + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + livenessProbe: + httpGet: + # -- Path of health check of application + path: / + # -- Port number/alias name of the container + port: zabbix-web + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + httpGet: + # -- Path of health check of application + path: / + # -- Port number/alias name of the container + port: zabbix-web + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + # -- The kubelet uses startup probes to know when a container application has started. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + startupProbe: {} + +# **Zabbix Web Service** configurations +zabbixWebService: + # -- Enables use of **Zabbix Web Service** + enabled: true + # -- Number of replicas of ``zabbixWebService`` module + replicaCount: 1 + # -- Set permissive podAntiAffinity to spread replicas over cluster nodes if replicaCount>1 + podAntiAffinity: true + # -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) + resources: {} + image: + # -- Zabbix Webservice Docker image name + repository: zabbix/zabbix-web-service + # -- Zabbix Webservice Docker image tag, if you want to override zabbixImageTag + tag: null + # -- Pull policy of Docker image + pullPolicy: IfNotPresent + # -- List of dockerconfig secrets names to use when pulling images + pullSecrets: [] + # -- Set the IgnoreURLCertErrors configuration setting of Zabbix Web Service + #ignoreURLCertErrors=1 + service: + # -- Type of service to expose the application. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + #More details: https://kubernetes.io/docs/concepts/services-networking/service/ + type: ClusterIP + # -- clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, + #is in-range (as per system configuration), and is not in use, it will be allocated to the service. + clusterIP: + # -- Port of service in Kubernetes cluster + port: 10053 + # -- Annotations for the Zabbix Web Service + annotations: {} + # metallb.universe.tf/address-pool: production-public-ips + # -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/web-service/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml + extraEnv: [] + # -- Annotations to add to the deployment + deploymentAnnotations: {} + # -- Labels to add to the deployment + deploymentLabels: {} + # -- Annotations to add to the containers + containerAnnotations: {} + # -- Labels to add to the containers + containerLabels: {} + # -- Additional volumeMounts to the Zabbix Web Service container + extraVolumeMounts: [] + # -- Additional containers to start within the Zabbix Web Service pod + extraContainers: [] + # -- Additional init containers to start within the Zabbix Web Service pod + extraInitContainers: [] + # -- Additional volumes to make available to the Zabbix Web Service pod + extraVolumes: [] + # -- Additional specifications to the Zabbix Web Service pod + extraPodSpecs: {} + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + # -- The kubelet uses liveness probes to know when to restart a container. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + livenessProbe: {} + # -- The kubelet uses readiness probes to know when a container is ready to start accepting traffic. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + readinessProbe: {} + # -- The kubelet uses startup probes to know when a container application has started. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + startupProbe: {} + +# **Zabbix Java Gateway** configurations +zabbixJavaGateway: + # -- Enables use of **Zabbix Java Gateway** + enabled: false + # -- Number of replicas of ``Zabbix Java Gateway`` module + replicaCount: 1 + # -- Requests and limits of pod resources. See: [https://kubernetes.io/docs/concepts/configuration/manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) + resources: {} + image: + # -- Zabbix Java Gateway Docker image name. + repository: zabbix/zabbix-java-gateway + # -- Zabbix Java Gateway Docker image tag, if you want to override zabbixImageTag + tag: null + # -- Pull policy of Docker image + pullPolicy: IfNotPresent + # -- List of dockerconfig secrets names to use when pulling images + pullSecrets: [] + # -- This variable is specified amount of pollers. By default, value is 5 + ZBX_START_POLLERS: 5 + # -- Name of properties file. Can be used to set additional properties using a key-value format in such a way that they are not visible on a command line or to overwrite existing ones. + # ZBX_PROPERTIES_FILE: + # -- The variable is used to specify debug level, from 0 to 5 + ZBX_DEBUGLEVEL: 3 + # -- This variable is used to specify timeout for outgoing connections. By default, value is 3. + ZBX_TIMEOUT: 3 + # -- Additional arguments for Zabbix Java Gateway. Useful to enable additional libraries and features. + # ZABBIX_OPTIONS: + # Java Gateway Service Name + ZBX_JAVAGATEWAY: zabbix-java-gateway + service: + # -- Type of service to expose the application. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + #More details: https://kubernetes.io/docs/concepts/services-networking/service/ + type: ClusterIP + # -- clusterIP is the IP address of the service and is usually assigned randomly. + #If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service. + clusterIP: + # -- externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's "externally-facing" addresses + #(NodePorts, ExternalIPs, and LoadBalancer IPs). "Local" to preserve sender's IP address. Please note that this might not work on multi-node clusters, + #depending on your network settings. + #externalTrafficPolicy: Local + # -- externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. + #These IPs are not managed by Kubernetes. + externalIPs: [] + # -- Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying + #the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerIP: "" + # -- If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer + #will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature. + loadBalancerSourceRanges: [] + # -- loadBalancerClass is the class of the load balancer implementation this Service belongs to. + #If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or + #"example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. + #If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, + #but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services + #with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. + #This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. + #This field will be wiped when a service is updated to a non 'LoadBalancer' type. + loadBalancerClass: "" + # -- Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. + #Must be ClientIP or None. Defaults to None. More info: + #https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + sessionAffinity: None + # -- Port of service in Kubernetes cluster + port: 10052 + # -- NodePort port to allocate on each node (only if service.type = NodePort or Loadbalancer) + nodePort: 31052 + # -- Annotations for the zabbix-java-gateway service + annotations: {} + # metallb.universe.tf/address-pool: production-public-ips + # -- Extra environment variables. A list of additional environment variables. List can be extended with other environment variables listed here: https://github.com/zabbix/zabbix-docker/tree/6.0/Dockerfiles/agent2/alpine#environment-variables. See example: https://github.com/zabbix-community/helm-zabbix/blob/master/charts/zabbix/docs/example/kind/values.yaml + extraEnv: [] + # -- Additional volumeMounts to the Zabbix Java Gateway container + extraVolumeMounts: [] + # -- Annotations to add to the deployment + deploymentAnnotations: {} + # -- Labels to add to the deployment + deploymentLabels: {} + # -- Annotations to add to the containers + containerAnnotations: {} + # -- Labels to add to the containers + containerLabels: {} + # -- Additional containers to start within the Zabbix Java Gateway pod + extraContainers: [] + # -- Additional init containers to start within the Zabbix Java Gateway pod + extraInitContainers: [] + # -- Additional volumes to make available to the Zabbix Java Gateway pod + extraVolumes: [] + # -- Additional specifications to the Zabbix Java Gateway pod + extraPodSpecs: {} + # -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: {} + # -- The kubelet uses liveness probes to know when to restart a container. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + livenessProbe: + tcpSocket: + # -- Port number/alias name of the container + port: zabbix-java-gw + timeoutSeconds: 3 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + # -- The kubelet uses readiness probes to know when a container is ready to start accepting traffic. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + readinessProbe: {} + # -- The kubelet uses startup probes to know when a container application has started. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + startupProbe: + tcpSocket: + # -- Port number/alias name of the container + port: zabbix-java-gw + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 5 + successThreshold: 1 + +# Ingress configurations +ingress: + # -- Enables Ingress + enabled: false + # -- Ingress annotations + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # -- Ingress hosts + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + # -- Ingress TLS configuration + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # -- pathType is only for k8s >= 1.1= + pathType: Prefix + +# Ingress CRD object for the Traefik Ingresscontroller +ingressRoute: + # -- Enables Traefik IngressRoute + enabled: false + # -- IngressRoute annotations + annotations: {} + # -- Ingressroute entrypoints + entryPoints: + - websecure + # -- Ingressroute host name + hostName: chart-example.local + # -- IngressRoute TLS configuration + #tls: + # certResolver: myresolver + +route: + # -- Enables Route object for Openshift + enabled: false + # -- Host Name for the route. Can be left empty + hostName: chart-example.local + # -- Openshift Route wildcardPolicy + #wildcardPolicy: + # -- Openshift Route TLS settings + tls: + termination: edge + #insecureEdgeTerminationPolicy: Redirect + # -- Openshift Route extra annotations + annotations: {} + +# -- nodeSelector configurations. Reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +nodeSelector: {} + +# -- Tolerations configurations. Reference: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +tolerations: [] + +# -- Affinity configurations. Reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +affinity: {} + +# -- Security Context configurations. Reference: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +securityContext: {} + +karpenter: + # -- Enables support provisioner of Karpenter. Reference: https://karpenter.sh/. + # Tested only using EKS cluster 1.28 in AWS with Karpenter 0.33.0. + enabled: false + # -- Name of cluster. Change the term CHANGE_HERE by EKS cluster name if you want to use Karpenter. + # Example: testing-my-cluster + clusterName: "CHANGE_HERE" + # -- Tag of discovery with name of cluster used by Karpenter. + # Change the term CHANGE_HERE by EKS cluster name if you want to use Karpenter. + # The cluster name, security group and subnets must have this tag. + tag: "karpenter.sh/discovery" + # -- Karpenter adds tags to all resources it creates, including EC2 Instances, EBS volumes, and Launch Templates. + # See details: https://karpenter.sh/v0.33/concepts/nodeclasses/#spectags + resourceTags: + Environment: testing + Scost: zabbix + product: zabbix + # -- Name of instanceProfile EKS cluster. Conflicts with karpenter.role. Must specify one of "role" or "instanceProfile" for Karpenter to launch nodes + # Example: Karpenter-testing-my-cluster-2023120112554517810000001e + instanceProfile: + use: false + name: "CHANGE_HERE" + # -- Name of role EKS cluster. The Karpenter spec.instanceProfile field has been removed from the EC2NodeClass in favor + # of the spec.role field. Karpenter is also removing support for the defaultInstanceProfile specified globally + # in the karpenter-global-settings, making the spec.role field required for all EC2NodeClasses. + # Karpenter will now auto-generate the instance profile in your EC2NodeClass, given the role that you specify. + # If using the Karpenter Getting Started Guide to deploy Karpenter, you can use the karpenter-irsa-$CLUSTER_NAME-$ID role + # provisioned by that process (which is limited to 64 characters). + # Example: karpenter-irsa-testing-my-cluster-2023120421433226760000001e + role: + use: true + name: "CHANGE_HERE" + # -- AMIFamily is a required field, dictating both the default bootstrapping logic for nodes provisioned + # through this EC2NodeClass but also selecting a group of recommended, latest AMIs by default. + # Currently, Karpenter supports amiFamily values AL2, Bottlerocket, Ubuntu, Windows2019, Windows2022 and Custom. + # GPUs are only supported by default with AL2 and Bottlerocket. + # The AL2 amiFamily does not support ARM64 GPU instance type + amiFamily: Bottlerocket + # -- Resource limits constrain the total size of the cluster. + # Limits prevent Karpenter from creating new instances once the limit is exceeded. + limits: + cpu: "2" + memory: "8Gi" + # -- Labels are arbitrary key-values that are applied to all nodes + labels: + karpenter: "true" + app: "zabbix" + # -- Priority given to the NodePool when the scheduler considers which NodePool to select. + # Higher weights indicate higher priority when comparing NodePools. + # Specifying no weight is equivalent to specifying a weight of 0. + weight: 10 + # -- Disruption section which describes the ways in which Karpenter can disrupt and replace Nodes. + # Configuration in this section constrains how aggressive Karpenter can be with performing operations + # like rolling Nodes due to them hitting their maximum lifetime (expiry) or scaling down nodes to reduce cluster cost + disruption: + # -- Describes which types of Nodes Karpenter should consider for consolidation. + # If using 'WhenUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or replace Nodes when it discovers that the Node is underutilized and could be changed to reduce cost + # If using `WhenEmpty`, Karpenter will only consider nodes for consolidation that contain no workload pods + consolidationPolicy: "WhenEmpty" + # -- The amount of time Karpenter should wait after discovering a consolidation decision + # This value can currently only be set when the consolidationPolicy is 'WhenEmpty' + # You can choose to disable consolidation entirely by setting the string value 'Never' here + consolidateAfter: "30s" + # -- The amount of time a Node can live on the cluster before being removed + # Avoiding long-running Nodes helps to reduce security vulnerabilities as well as to reduce the chance of issues that can plague Nodes with long uptimes such as file fragmentation or memory leaks from system processes + # You can choose to disable expiration entirely by setting the string value 'Never' here + expireAfter: "720h" + # -- Requirements that constrain the parameters of provisioned nodes. + # These requirements are combined with pod.spec.topologySpreadConstraints, pod.spec.affinity.nodeAffinity, pod.spec.affinity.podAffinity, and pod.spec.nodeSelector rules. + # Operators { In, NotIn, Exists, DoesNotExist, Gt, and Lt } are supported. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#operators + requirements: + - key: "karpenter.k8s.aws/instance-category" + operator: In + values: ["c", "m", "r"] + - key: "karpenter.k8s.aws/instance-cpu" + operator: In + values: ["2", "4", "8", "16", "32"] + - key: "kubernetes.io/arch" + operator: In + values: ["amd64"] + - key: kubernetes.io/os + operator: In + values: ["linux"] + - key: "karpenter.sh/capacity-type" + operator: In + values: ["spot", "on-demand"] + # -- Optional, configures IMDS for the instance + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: required + +serviceAccount: + # -- Specifies whether a service account should be created. + create: true + # -- The name of the service account to use. If not set and create is true, a name is generated using the fullname template. + name: "" + # -- Optional additional annotations to add to the Service Account. + annotations: {} + # -- Optional additional labels to add to the Service Account. + labels: {} + # -- Automount API credentials for a Service Account. + automountServiceAccountToken: true + +rbac: + # -- Specifies whether the RBAC resources should be created + create: true + additionalRulesForClusterRole: [] + # - apiGroups: [ "" ] + # resources: + # - nodes/proxy + # verbs: [ "get", "list", "watch" ] From 43acadb097d73cce06004f1325a29db52f7d0f92 Mon Sep 17 00:00:00 2001 From: MatheuslFavaretto Date: Wed, 4 Sep 2024 15:43:51 -0300 Subject: [PATCH 2/6] change values zabbix --- charts/zabbix/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/zabbix/values.yaml b/charts/zabbix/values.yaml index 24ac0c2..b06935a 100644 --- a/charts/zabbix/values.yaml +++ b/charts/zabbix/values.yaml @@ -60,7 +60,7 @@ postgresAccess: # -- Key of the secret used for Postgres Password, requires POSTGRES_PASSWORD_SECRET, defaults to password #passwordSecretKey: "password" # -- Password of database - ignored if passwordSecret is set - password: "zabbix" + password: "Zabbix@1234-" # -- Name of database database: "zabbix" From bc635e0d499212d9b76c4094fdb9020483780a4c Mon Sep 17 00:00:00 2001 From: MatheuslFavaretto Date: Wed, 4 Sep 2024 17:50:12 -0300 Subject: [PATCH 3/6] change values zabbix --- charts/zabbix/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/zabbix/values.yaml b/charts/zabbix/values.yaml index b06935a..6aae887 100644 --- a/charts/zabbix/values.yaml +++ b/charts/zabbix/values.yaml @@ -60,7 +60,7 @@ postgresAccess: # -- Key of the secret used for Postgres Password, requires POSTGRES_PASSWORD_SECRET, defaults to password #passwordSecretKey: "password" # -- Password of database - ignored if passwordSecret is set - password: "Zabbix@1234-" + password: "4jV0K61,C9E6" # -- Name of database database: "zabbix" From 5b76af6540945c177f09453da2fdf890743bcdd5 Mon Sep 17 00:00:00 2001 From: MatheuslFavaretto Date: Wed, 4 Sep 2024 17:56:01 -0300 Subject: [PATCH 4/6] change values zabbix --- charts/zabbix/values.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/charts/zabbix/values.yaml b/charts/zabbix/values.yaml index 6aae887..d64a150 100644 --- a/charts/zabbix/values.yaml +++ b/charts/zabbix/values.yaml @@ -60,7 +60,10 @@ postgresAccess: # -- Key of the secret used for Postgres Password, requires POSTGRES_PASSWORD_SECRET, defaults to password #passwordSecretKey: "password" # -- Password of database - ignored if passwordSecret is set - password: "4jV0K61,C9E6" + # ggignore + + password: "zabbix" + # -- Name of database database: "zabbix" From 8149ee4d337162e01f30830f8e1193d08ffde2ad Mon Sep 17 00:00:00 2001 From: MatheuslFavaretto Date: Sun, 8 Sep 2024 23:37:48 -0300 Subject: [PATCH 5/6] feat: add poc-harbor and change README.md --- Makefile | 13 +- README.md | 237 ++++++++- charts/harbor/values.yaml | 1058 +++++++++++++++++++++++++++++++++++++ 3 files changed, 1297 insertions(+), 11 deletions(-) create mode 100644 charts/harbor/values.yaml diff --git a/Makefile b/Makefile index 5fd10c1..3852730 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: create delete create-argocd upgrade-argocd delete-argocd create-metrics-server upgrade-metrics-server delte-metrics-server +.PHONY: create delete create-argocd upgrade-argocd delete-argocd create-metrics-server upgrade-metrics-server delete-metrics-server create-prometheus-stack upgrade-prometheus-stack delete-prometheus-stack create-jaeger delete-jaeger create-odigos delete-odigos create-prometheus-adapter delete-prometheus-adapter create-zabbix delete-zabbix create-harbor delete-harbor create: kind create cluster --config config.yaml @@ -13,7 +13,7 @@ upgrade-argocd: helm upgrade argocd argo/argo-cd -n argocd -f charts/argocd/values.yaml delete-argocd: - helm uninstall argocd -n argocd --delete-namespace + helm uninstall argocd -n argocd && kubectl delete namespace argocd create-metrics-server: helm install metrics-server metrics-server/metrics-server -n kube-system -f charts/metrics-server/values.yaml @@ -31,7 +31,7 @@ upgrade-prometheus-stack: helm upgrade prometheus-stack prometheus-community/kube-prometheus-stack -n prometheus -f charts/prometheus-stack/values.yaml delete-prometheus-stack: - helm uninstall prometheus-stack -n prometheus --delete-namespace + helm uninstall prometheus-stack -n prometheus && kubectl delete namespace prometheus create-jaeger: kubectl apply -f manifests/jaeger/values.yaml @@ -49,7 +49,7 @@ create-prometheus-adapter: helm install prometheus-adapter prometheus-community/prometheus-adapter -n custom-metrics --create-namespace -f charts/prometheus-adapter/values.yaml delete-prometheus-adapter: - helm uninstall prometheus-adapter -n custom-metrics --delete-namespace + helm uninstall prometheus-adapter -n custom-metrics && kubectl delete namespace custom-metrics create-zabbix: helm install my-zabbix-test zabbix-community/zabbix --version 5.0.1 -n zabbix --create-namespace -f charts/zabbix/values.yaml @@ -57,3 +57,8 @@ create-zabbix: delete-zabbix: helm uninstall my-zabbix-test -n zabbix && kubectl delete namespace zabbix +create-harbor: + helm install poc-harbor harbor/harbor -n harbor --create-namespace -f charts/harbor/values.yaml + +delete-harbor: + helm uninstall poc-harbor -n harbor && kubectl delete namespace harbor diff --git a/README.md b/README.md index a43aa38..1d4657c 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,236 @@ -# Kind kubernetes cluster +# Kind Kubernetes Cluster -Inspired by [msfidelis](https://github.com/msfidelis/kubernetes-kind-setup), I decided to create my own version of a Kubernetes cluster using [kind](https://kind.sigs.k8s.io/). +This project is designed to present and test my Proof of Concepts (PoCs) and studies related to Kubernetes tools and configurations. -## Creating a custom cluster +## Prerequisites -`make create` +- [Docker](https://www.docker.com/) +- [Kind](https://kind.sigs.k8s.io/) +- [Helm](https://helm.sh/) -## Deleting the cluster +## Overview -`make delete` +This project leverages **Kind** (Kubernetes IN Docker) to create a local Kubernetes cluster for testing various tools like ArgoCD, Prometheus, Zabbix, and more. All operations can be executed via the Makefile. -## Creating argocd +## Commands Overview + +### 1. Creating a Custom Cluster + +To create a local Kubernetes cluster using Kind with a predefined configuration: + +```bash +make create +``` + +This command uses the `config.yaml` file to provision the cluster with specific configurations, such as node settings, port mappings, etc. + +### 2. Deleting the Cluster + +To remove the existing Kubernetes cluster: + +```bash +make delete +``` + +This command deletes the Kind cluster, cleaning up all the resources created in the process. + +### 3. Creating ArgoCD + +To install **ArgoCD** into the Kubernetes cluster: + +```bash +make create-argocd +``` + +This installs ArgoCD using Helm with the custom values provided in `charts/argocd/values.yaml`, creating the `argocd` namespace if it doesn't exist. + +### 4. Upgrading ArgoCD + +To upgrade the existing ArgoCD installation: + +```bash +make upgrade-argocd +``` + +This command upgrades the ArgoCD installation with the latest version or any changes specified in the `charts/argocd/values.yaml`. + +### 5. Deleting ArgoCD + +To uninstall ArgoCD from the cluster: + +```bash +make delete-argocd +``` + +This removes ArgoCD and deletes the `argocd` namespace from the cluster. + +### 6. Creating the Metrics Server + +To install the **Metrics Server**: + +```bash +make create-metrics-server +``` + +This deploys the Metrics Server using Helm and custom values from `charts/metrics-server/values.yaml` into the `kube-system` namespace. + +### 7. Upgrading the Metrics Server + +To upgrade the Metrics Server: + +```bash +make upgrade-metrics-server +``` + +This command upgrades the Metrics Server with any new configurations or updates specified in the `charts/metrics-server/values.yaml`. + +### 8. Deleting the Metrics Server + +To uninstall the Metrics Server: + +```bash +make delete-metrics-server +``` + +This command removes the Metrics Server from the `kube-system` namespace. + +### 9. Creating Prometheus Stack + +To install the **Prometheus Stack** for monitoring: + +```bash +make create-prometheus-stack +``` + +This installs the Prometheus Stack (Prometheus, Alertmanager, Grafana, etc.) using Helm with the values from `charts/prometheus-stack/values.yaml`, creating the `prometheus` namespace. + +### 10. Upgrading Prometheus Stack + +To upgrade the Prometheus Stack: + +```bash +make upgrade-prometheus-stack +``` + +This upgrades the Prometheus Stack with any changes or updates specified in `charts/prometheus-stack/values.yaml`. + +### 11. Deleting Prometheus Stack + +To uninstall the Prometheus Stack: + +```bash +make delete-prometheus-stack +``` + +This command removes Prometheus and deletes the `prometheus` namespace. + +### 12. Creating Jaeger + +To deploy **Jaeger** for distributed tracing: + +```bash +make create-jaeger +``` + +This applies the Jaeger deployment from the `manifests/jaeger/values.yaml`. + +### 13. Deleting Jaeger + +To delete the Jaeger deployment: + +```bash +make delete-jaeger +``` + +This removes the Jaeger deployment. + +### 14. Creating Odigos + +To install **Odigos**: + +```bash +make create-odigos +``` + +This command applies the Odigos deployment defined in `labs/odigos/values.yaml`. + +### 15. Deleting Odigos + +To delete the Odigos deployment: + +```bash +make delete-odigos +``` + +This command removes the Odigos deployment from the cluster. + +### 16. Creating Prometheus Adapter + +To install the **Prometheus Adapter** for custom metrics: + +```bash +make create-prometheus-adapter +``` + +This installs the Prometheus Adapter using Helm with the values from `charts/prometheus-adapter/values.yaml`, creating the `custom-metrics` namespace. + +### 17. Deleting Prometheus Adapter + +To uninstall the Prometheus Adapter: + +```bash +make delete-prometheus-adapter +``` + +This command removes the Prometheus Adapter and deletes the `custom-metrics` namespace. + +### 18. Creating Zabbix + +To install **Zabbix**: + +```bash +make create-zabbix +``` + +This installs Zabbix using Helm with the values from `charts/zabbix/values.yaml`, creating the `zabbix` namespace. + +### 19. Deleting Zabbix + +To uninstall Zabbix: + +```bash +make delete-zabbix +``` + +This removes Zabbix and deletes the `zabbix` namespace. + +### 20. Creating Harbor + +To install **Harbor** for container registry: + +```bash +make create-harbor +``` + +This installs Harbor using Helm with the values from `charts/harbor/values.yaml`, creating the `harbor` namespace. + +### 21. Deleting Harbor + +To uninstall Harbor: + +```bash +make delete-harbor +``` + +This removes Harbor and deletes the `harbor` namespace. + +## Conclusion + +This Makefile simplifies the deployment, management, and deletion of various Kubernetes tools and services in a local Kind cluster environment. Feel free to modify the values and configurations in the charts for your specific use cases. +""" + +file_path = "/mnt/data/README.mdx" +with open(file_path, "w") as file: + file.write(mdx_content) + +file_path diff --git a/charts/harbor/values.yaml b/charts/harbor/values.yaml new file mode 100644 index 0000000..ab8b62d --- /dev/null +++ b/charts/harbor/values.yaml @@ -0,0 +1,1058 @@ +expose: + # Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer" + # and fill the information in the corresponding section + type: ingress + tls: + # Enable TLS or not. + # Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress" + # Note: if the "expose.type" is "ingress" and TLS is disabled, + # the port must be included in the command when pulling/pushing images. + # Refer to https://github.com/goharbor/harbor/issues/5291 for details. + enabled: true + # The source of the tls certificate. Set as "auto", "secret" + # or "none" and fill the information in the corresponding section + # 1) auto: generate the tls certificate automatically + # 2) secret: read the tls certificate from the specified secret. + # The tls certificate can be generated manually or by cert manager + # 3) none: configure no tls certificate for the ingress. If the default + # tls certificate is configured in the ingress controller, choose this option + certSource: auto + auto: + # The common name used to generate the certificate, it's necessary + # when the type isn't "ingress" + commonName: "" + secret: + # The name of secret which contains keys named: + # "tls.crt" - the certificate + # "tls.key" - the private key + secretName: "" + ingress: + hosts: + core: core.harbor.domain + # set to the type of ingress controller if it has specific requirements. + # leave as `default` for most ingress controllers. + # set to `gce` if using the GCE ingress controller + # set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller + # set to `alb` if using the ALB ingress controller + # set to `f5-bigip` if using the F5 BIG-IP ingress controller + controller: default + ## Allow .Capabilities.KubeVersion.Version to be overridden while creating ingress + kubeVersionOverride: "" + className: "" + annotations: + # note different ingress controllers may require a different ssl-redirect annotation + # for Envoy, use ingress.kubernetes.io/force-ssl-redirect: "true" and remove the nginx lines below + ingress.kubernetes.io/ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + # ingress-specific labels + labels: {} + clusterIP: + # The name of ClusterIP service + name: harbor + # The ip address of the ClusterIP service (leave empty for acquiring dynamic ip) + staticClusterIP: "" + ports: + # The service port Harbor listens on when serving HTTP + httpPort: 80 + # The service port Harbor listens on when serving HTTPS + httpsPort: 443 + # Annotations on the ClusterIP service + annotations: {} + # ClusterIP-specific labels + labels: {} + nodePort: + # The name of NodePort service + name: harbor + ports: + http: + # The service port Harbor listens on when serving HTTP + port: 80 + # The node port Harbor listens on when serving HTTP + nodePort: 30002 + https: + # The service port Harbor listens on when serving HTTPS + port: 443 + # The node port Harbor listens on when serving HTTPS + nodePort: 30003 + # Annotations on the nodePort service + annotations: {} + # nodePort-specific labels + labels: {} + loadBalancer: + # The name of LoadBalancer service + name: harbor + # Set the IP if the LoadBalancer supports assigning IP + IP: "" + ports: + # The service port Harbor listens on when serving HTTP + httpPort: 80 + # The service port Harbor listens on when serving HTTPS + httpsPort: 443 + # Annotations on the loadBalancer service + annotations: {} + # loadBalancer-specific labels + labels: {} + sourceRanges: [] + +# The external URL for Harbor core service. It is used to +# 1) populate the docker/helm commands showed on portal +# 2) populate the token service URL returned to docker client +# +# Format: protocol://domain[:port]. Usually: +# 1) if "expose.type" is "ingress", the "domain" should be +# the value of "expose.ingress.hosts.core" +# 2) if "expose.type" is "clusterIP", the "domain" should be +# the value of "expose.clusterIP.name" +# 3) if "expose.type" is "nodePort", the "domain" should be +# the IP address of k8s node +# +# If Harbor is deployed behind the proxy, set it as the URL of proxy +externalURL: https://core.harbor.domain + +# The persistence is enabled by default and a default StorageClass +# is needed in the k8s cluster to provision volumes dynamically. +# Specify another StorageClass in the "storageClass" or set "existingClaim" +# if you already have existing persistent volumes to use +# +# For storing images and charts, you can also use "azure", "gcs", "s3", +# "swift" or "oss". Set it in the "imageChartStorage" section +persistence: + enabled: true + # Setting it to "keep" to avoid removing PVCs during a helm delete + # operation. Leaving it empty will delete PVCs after the chart deleted + # (this does not apply for PVCs that are created for internal database + # and redis components, i.e. they are never deleted automatically) + resourcePolicy: "keep" + persistentVolumeClaim: + registry: + # Use the existing PVC which must be created manually before bound, + # and specify the "subPath" if the PVC is shared with other components + existingClaim: "" + # Specify the "storageClass" used to provision the volume. Or the default + # StorageClass will be used (the default). + # Set it to "-" to disable dynamic provisioning + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + annotations: {} + jobservice: + jobLog: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 1Gi + annotations: {} + # If external database is used, the following settings for database will + # be ignored + database: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 1Gi + annotations: {} + # If external Redis is used, the following settings for Redis will + # be ignored + redis: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 1Gi + annotations: {} + trivy: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + annotations: {} + # Define which storage backend is used for registry to store + # images and charts. Refer to + # https://github.com/distribution/distribution/blob/main/docs/content/about/configuration.md#storage + # for the detail. + imageChartStorage: + # Specify whether to disable `redirect` for images and chart storage, for + # backends which not supported it (such as using minio for `s3` storage type), please disable + # it. To disable redirects, simply set `disableredirect` to `true` instead. + # Refer to + # https://github.com/distribution/distribution/blob/main/docs/configuration.md#redirect + # for the detail. + disableredirect: false + # Specify the "caBundleSecretName" if the storage service uses a self-signed certificate. + # The secret must contain keys named "ca.crt" which will be injected into the trust store + # of registry's containers. + # caBundleSecretName: + + # Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift", + # "oss" and fill the information needed in the corresponding section. The type + # must be "filesystem" if you want to use persistent volumes for registry + type: filesystem + filesystem: + rootdirectory: /storage + #maxthreads: 100 + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + #realm: core.windows.net + # To use existing secret, the key must be AZURE_STORAGE_ACCESS_KEY + existingSecret: "" + gcs: + bucket: bucketname + # The base64 encoded json file which contains the key + encodedkey: base64-encoded-json-key-file + #rootdirectory: /gcs/object/name/prefix + #chunksize: "5242880" + # To use existing secret, the key must be GCS_KEY_DATA + existingSecret: "" + useWorkloadIdentity: false + s3: + # Set an existing secret for S3 accesskey and secretkey + # keys in the secret should be REGISTRY_STORAGE_S3_ACCESSKEY and REGISTRY_STORAGE_S3_SECRETKEY for registry + #existingSecret: "" + region: us-west-1 + bucket: bucketname + #accesskey: awsaccesskey + #secretkey: awssecretkey + #regionendpoint: http://myobjects.local + #encrypt: false + #keyid: mykeyid + #secure: true + #skipverify: false + #v4auth: true + #chunksize: "5242880" + #rootdirectory: /s3/object/name/prefix + #storageclass: STANDARD + #multipartcopychunksize: "33554432" + #multipartcopymaxconcurrency: 100 + #multipartcopythresholdsize: "33554432" + swift: + authurl: https://storage.myprovider.com/v3/auth + username: username + password: password + container: containername + # keys in existing secret must be REGISTRY_STORAGE_SWIFT_PASSWORD, REGISTRY_STORAGE_SWIFT_SECRETKEY, REGISTRY_STORAGE_SWIFT_ACCESSKEY + existingSecret: "" + #region: fr + #tenant: tenantname + #tenantid: tenantid + #domain: domainname + #domainid: domainid + #trustid: trustid + #insecureskipverify: false + #chunksize: 5M + #prefix: + #secretkey: secretkey + #accesskey: accesskey + #authversion: 3 + #endpointtype: public + #tempurlcontainerkey: false + #tempurlmethods: + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: regionname + bucket: bucketname + # key in existingSecret must be REGISTRY_STORAGE_OSS_ACCESSKEYSECRET + existingSecret: "" + #endpoint: endpoint + #internal: false + #encrypt: false + #secure: true + #chunksize: 10M + #rootdirectory: rootdirectory + +# The initial password of Harbor admin. Change it from portal after launching Harbor +# or give an existing secret for it +# key in secret is given via (default to HARBOR_ADMIN_PASSWORD) +# existingSecretAdminPassword: +existingSecretAdminPasswordKey: HARBOR_ADMIN_PASSWORD +harborAdminPassword: "Harbor12345" + +# The internal TLS used for harbor components secure communicating. In order to enable https +# in each component tls cert files need to provided in advance. +internalTLS: + # If internal TLS enabled + enabled: false + # enable strong ssl ciphers (default: false) + strong_ssl_ciphers: false + # There are three ways to provide tls + # 1) "auto" will generate cert automatically + # 2) "manual" need provide cert file manually in following value + # 3) "secret" internal certificates from secret + certSource: "auto" + # The content of trust ca, only available when `certSource` is "manual" + trustCa: "" + # core related cert configuration + core: + # secret name for core's tls certs + secretName: "" + # Content of core's TLS cert file, only available when `certSource` is "manual" + crt: "" + # Content of core's TLS key file, only available when `certSource` is "manual" + key: "" + # jobservice related cert configuration + jobservice: + # secret name for jobservice's tls certs + secretName: "" + # Content of jobservice's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of jobservice's TLS key file, only available when `certSource` is "manual" + key: "" + # registry related cert configuration + registry: + # secret name for registry's tls certs + secretName: "" + # Content of registry's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of registry's TLS key file, only available when `certSource` is "manual" + key: "" + # portal related cert configuration + portal: + # secret name for portal's tls certs + secretName: "" + # Content of portal's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of portal's TLS key file, only available when `certSource` is "manual" + key: "" + # trivy related cert configuration + trivy: + # secret name for trivy's tls certs + secretName: "" + # Content of trivy's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of trivy's TLS key file, only available when `certSource` is "manual" + key: "" + +ipFamily: + # ipv6Enabled set to true if ipv6 is enabled in cluster, currently it affected the nginx related component + ipv6: + enabled: true + # ipv4Enabled set to true if ipv4 is enabled in cluster, currently it affected the nginx related component + ipv4: + enabled: true + +imagePullPolicy: IfNotPresent + +# Use this set to assign a list of default pullSecrets +imagePullSecrets: +# - name: docker-registry-secret +# - name: internal-registry-secret + +# The update strategy for deployments with persistent volumes(jobservice, registry): "RollingUpdate" or "Recreate" +# Set it as "Recreate" when "RWM" for volumes isn't supported +updateStrategy: + type: RollingUpdate + +# debug, info, warning, error or fatal +logLevel: info + +# The name of the secret which contains key named "ca.crt". Setting this enables the +# download link on portal to download the CA certificate when the certificate isn't +# generated automatically +caSecretName: "" + +# The secret key used for encryption. Must be a string of 16 chars. +secretKey: "not-a-secure-key" +# If using existingSecretSecretKey, the key must be secretKey +existingSecretSecretKey: "" + +# The proxy settings for updating trivy vulnerabilities from the Internet and replicating +# artifacts from/to the registries that cannot be reached directly +proxy: + httpProxy: + httpsProxy: + noProxy: 127.0.0.1,localhost,.local,.internal + components: + - core + - jobservice + - trivy + +# Run the migration job via helm hook +enableMigrateHelmHook: false + +# The custom ca bundle secret, the secret must contain key named "ca.crt" +# which will be injected into the trust store for core, jobservice, registry, trivy components +# caBundleSecretName: "" + +## UAA Authentication Options +# If you're using UAA for authentication behind a self-signed +# certificate you will need to provide the CA Cert. +# Set uaaSecretName below to provide a pre-created secret that +# contains a base64 encoded CA Certificate named `ca.crt`. +# uaaSecretName: + +metrics: + enabled: false + core: + path: /metrics + port: 8001 + registry: + path: /metrics + port: 8001 + jobservice: + path: /metrics + port: 8001 + exporter: + path: /metrics + port: 8001 + ## Create prometheus serviceMonitor to scrape harbor metrics. + ## This requires the monitoring.coreos.com/v1 CRD. Please see + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md + ## + serviceMonitor: + enabled: false + additionalLabels: {} + # Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + # Metric relabel configs to apply to samples before ingestion. + metricRelabelings: + [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + # Relabel configs to apply to samples before ingestion. + relabelings: + [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +trace: + enabled: false + # trace provider: jaeger or otel + # jaeger should be 1.26+ + provider: jaeger + # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth + sample_rate: 1 + # namespace used to differentiate different harbor services + # namespace: + # attributes is a key value dict contains user defined attributes used to initialize trace provider + # attributes: + # application: harbor + jaeger: + # jaeger supports two modes: + # collector mode(uncomment endpoint and uncomment username, password if needed) + # agent mode(uncomment agent_host and agent_port) + endpoint: http://hostname:14268/api/traces + # username: + # password: + # agent_host: hostname + # export trace data by jaeger.thrift in compact mode + # agent_port: 6831 + otel: + endpoint: hostname:4318 + url_path: /v1/traces + compression: false + insecure: true + # timeout is in seconds + timeout: 10 + +# cache layer configurations +# if this feature enabled, harbor will cache the resource +# `project/project_metadata/repository/artifact/manifest` in the redis +# which help to improve the performance of high concurrent pulling manifest. +cache: + # default is not enabled. + enabled: false + # default keep cache for one day. + expireHours: 24 + +## set Container Security Context to comply with PSP restricted policy if necessary +## each of the conatiner will apply the same security context +## containerSecurityContext:{} is initially an empty yaml that you could edit it on demand, we just filled with a common template for convenience +containerSecurityContext: + privileged: false + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + runAsNonRoot: true + capabilities: + drop: + - ALL + +# If service exposed via "ingress", the Nginx will not be used +nginx: + image: + repository: goharbor/nginx-photon + tag: v2.11.1 + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## The priority class to run the pod as + priorityClassName: + +portal: + image: + repository: goharbor/harbor-portal + tag: v2.11.1 + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## Additional service annotations + serviceAnnotations: {} + ## The priority class to run the pod as + priorityClassName: + # containers to be run before the controller's container starts. + initContainers: [] + # Example: + # + # - name: wait + # image: busybox + # command: [ 'sh', '-c', "sleep 20" ] + +core: + image: + repository: goharbor/harbor-core + tag: v2.11.1 + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + ## Startup probe values + startupProbe: + enabled: true + initialDelaySeconds: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## Additional service annotations + serviceAnnotations: {} + ## The priority class to run the pod as + priorityClassName: + # containers to be run before the controller's container starts. + initContainers: [] + # Example: + # + # - name: wait + # image: busybox + # command: [ 'sh', '-c', "sleep 20" ] + ## User settings configuration json string + configureUserSettings: + # The provider for updating project quota(usage), there are 2 options, redis or db. + # By default it is implemented by db but you can configure it to redis which + # can improve the performance of high concurrent pushing to the same project, + # and reduce the database connections spike and occupies. + # Using redis will bring up some delay for quota usage updation for display, so only + # suggest switch provider to redis if you were ran into the db connections spike around + # the scenario of high concurrent pushing to same project, no improvment for other scenes. + quotaUpdateProvider: db # Or redis + # Secret is used when core server communicates with other components. + # If a secret key is not specified, Helm will generate one. Alternatively set existingSecret to use an existing secret + # Must be a string of 16 chars. + secret: "" + # Fill in the name of a kubernetes secret if you want to use your own + # If using existingSecret, the key must be secret + existingSecret: "" + # Fill the name of a kubernetes secret if you want to use your own + # TLS certificate and private key for token encryption/decryption. + # The secret must contain keys named: + # "tls.key" - the private key + # "tls.crt" - the certificate + secretName: "" + # If not specifying a preexisting secret, a secret can be created from tokenKey and tokenCert and used instead. + # If none of secretName, tokenKey, and tokenCert are specified, an ephemeral key and certificate will be autogenerated. + # tokenKey and tokenCert must BOTH be set or BOTH unset. + # The tokenKey value is formatted as a multiline string containing a PEM-encoded RSA key, indented one more than tokenKey on the following line. + tokenKey: | + # If tokenKey is set, the value of tokenCert must be set as a PEM-encoded certificate signed by tokenKey, and supplied as a multiline string, indented one more than tokenCert on the following line. + tokenCert: | + # The XSRF key. Will be generated automatically if it isn't specified + xsrfKey: "" + # If using existingSecret, the key is defined by core.existingXsrfSecretKey + existingXsrfSecret: "" + # If using existingSecret, the key + existingXsrfSecretKey: CSRF_KEY + # The time duration for async update artifact pull_time and repository + # pull_count, the unit is second. Will be 10 seconds if it isn't set. + # eg. artifactPullAsyncFlushDuration: 10 + artifactPullAsyncFlushDuration: + gdpr: + deleteUser: false + auditLogsCompliant: false + +jobservice: + image: + repository: goharbor/harbor-jobservice + tag: v2.11.1 + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## The priority class to run the pod as + priorityClassName: + # containers to be run before the controller's container starts. + initContainers: [] + # Example: + # + # - name: wait + # image: busybox + # command: [ 'sh', '-c', "sleep 20" ] + maxJobWorkers: 10 + # The logger for jobs: "file", "database" or "stdout" + jobLoggers: + - file + # - database + # - stdout + # The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`) + loggerSweeperDuration: 14 #days + notification: + webhook_job_max_retry: 3 + webhook_job_http_client_timeout: 3 # in seconds + reaper: + # the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24 + max_update_hours: 24 + # the max time for execution in running state without new task created + max_dangling_hours: 168 + # Secret is used when job service communicates with other components. + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + # Use an existing secret resource + existingSecret: "" + # Key within the existing secret for the job service secret + existingSecretKey: JOBSERVICE_SECRET + +registry: + registry: + image: + repository: goharbor/registry-photon + tag: v2.11.1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + controller: + image: + repository: goharbor/harbor-registryctl + tag: v2.11.1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## The priority class to run the pod as + priorityClassName: + # containers to be run before the controller's container starts. + initContainers: [] + # Example: + # + # - name: wait + # image: busybox + # command: [ 'sh', '-c', "sleep 20" ] + # Secret is used to secure the upload state from client + # and registry storage backend. + # See: https://github.com/distribution/distribution/blob/main/docs/configuration.md#http + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + # Use an existing secret resource + existingSecret: "" + # Key within the existing secret for the registry service secret + existingSecretKey: REGISTRY_HTTP_SECRET + # If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. + relativeurls: false + credentials: + username: "harbor_registry_user" + password: "harbor_registry_password" + # If using existingSecret, the key must be REGISTRY_PASSWD and REGISTRY_HTPASSWD + existingSecret: "" + # Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt. + # htpasswdString: $apr1$XLefHzeG$Xl4.s00sMSCCcMyJljSZb0 # example string + htpasswdString: "" + middleware: + enabled: false + type: cloudFront + cloudFront: + baseurl: example.cloudfront.net + keypairid: KEYPAIRID + duration: 3000s + ipfilteredby: none + # The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key + # that allows access to CloudFront + privateKeySecret: "my-secret" + # enable purge _upload directories + upload_purging: + enabled: true + # remove files in _upload directories which exist for a period of time, default is one week. + age: 168h + # the interval of the purge operations + interval: 24h + dryrun: false + +trivy: + # enabled the flag to enable Trivy scanner + enabled: true + image: + # repository the repository for Trivy adapter image + repository: goharbor/trivy-adapter-photon + # tag the tag for Trivy adapter image + tag: v2.11.1 + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + # replicas the number of Pod replicas + replicas: 1 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 1 + memory: 1Gi + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + ## The priority class to run the pod as + priorityClassName: + # containers to be run before the controller's container starts. + initContainers: [] + # Example: + # + # - name: wait + # image: busybox + # command: [ 'sh', '-c', "sleep 20" ] + # debugMode the flag to enable Trivy debug mode with more verbose scanning log + debugMode: false + # vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`. + vulnType: "os,library" + # severity a comma-separated list of severities to be checked + severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL" + # ignoreUnfixed the flag to display only fixed vulnerabilities + ignoreUnfixed: false + # insecure the flag to skip verifying registry certificate + insecure: false + # gitHubToken the GitHub access token to download Trivy DB + # + # Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases. + # It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached + # in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update + # timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one. + # Currently, the database is updated every 12 hours and published as a new release to GitHub. + # + # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough + # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 + # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult + # https://developer.github.com/v3/#rate-limiting + # + # You can create a GitHub token by following the instructions in + # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line + gitHubToken: "" + # skipUpdate the flag to disable Trivy DB downloads from GitHub + # + # You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues. + # If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the + # `/home/scanner/.cache/trivy/db/trivy.db` path. + skipUpdate: false + # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the + # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path + # + skipJavaDBUpdate: false + # The offlineScan option prevents Trivy from sending API requests to identify dependencies. + # + # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it. + # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't + # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode. + # It would work if all the dependencies are in local. + # This option doesn’t affect DB download. You need to specify skipUpdate as well as offlineScan in an air-gapped environment. + offlineScan: false + # Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`. + securityCheck: "vuln" + # The duration to wait for scan completion + timeout: 5m0s + +database: + # if external database is used, set "type" to "external" + # and fill the connection information in "external" section + type: internal + internal: + image: + repository: goharbor/harbor-db + tag: v2.11.1 + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + # The timeout used in livenessProbe; 1 to 5 seconds + livenessProbe: + timeoutSeconds: 1 + # The timeout used in readinessProbe; 1 to 5 seconds + readinessProbe: + timeoutSeconds: 1 + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + ## The priority class to run the pod as + priorityClassName: + # containers to be run before the controller's container starts. + extrInitContainers: [] + # Example: + # + # - name: wait + # image: busybox + # command: [ 'sh', '-c', "sleep 20" ] + # The initial superuser password for internal database + password: "changeit" + # The size limit for Shared memory, pgSQL use it for shared_buffer + # More details see: + # https://github.com/goharbor/harbor/issues/15034 + shmSizeLimit: 512Mi + initContainer: + migrator: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + permissions: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + external: + host: "192.168.0.1" + port: "5432" + username: "user" + password: "password" + coreDatabase: "registry" + # if using existing secret, the key must be "password" + existingSecret: "" + # "disable" - No SSL + # "require" - Always SSL (skip verification) + # "verify-ca" - Always SSL (verify that the certificate presented by the + # server was signed by a trusted CA) + # "verify-full" - Always SSL (verify that the certification presented by the + # server was signed by a trusted CA and the server host name matches the one + # in the certificate) + sslmode: "disable" + # The maximum number of connections in the idle connection pool per pod (core+exporter). + # If it <=0, no idle connections are retained. + maxIdleConns: 100 + # The maximum number of open connections to the database per pod (core+exporter). + # If it <= 0, then there is no limit on the number of open connections. + # Note: the default number of connections is 1024 for harbor's postgres. + maxOpenConns: 900 + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + +redis: + # if external Redis is used, set "type" to "external" + # and fill the connection information in "external" section + type: internal + internal: + image: + repository: goharbor/redis-photon + tag: v2.11.1 + # set the service account to be used, default if left empty + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + nodeSelector: {} + tolerations: [] + affinity: {} + ## The priority class to run the pod as + priorityClassName: + # containers to be run before the controller's container starts. + initContainers: [] + # Example: + # + # - name: wait + # image: busybox + # command: [ 'sh', '-c', "sleep 20" ] + # # jobserviceDatabaseIndex defaults to "1" + # # registryDatabaseIndex defaults to "2" + # # trivyAdapterIndex defaults to "5" + # # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional + # # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional + jobserviceDatabaseIndex: "1" + registryDatabaseIndex: "2" + trivyAdapterIndex: "5" + # harborDatabaseIndex: "6" + # cacheLayerDatabaseIndex: "7" + external: + # support redis, redis+sentinel + # addr for redis: : + # addr for redis+sentinel: :,:,: + addr: "192.168.0.2:6379" + # The name of the set of Redis instances to monitor, it must be set to support redis+sentinel + sentinelMasterSet: "" + # The "coreDatabaseIndex" must be "0" as the library Harbor + # used doesn't support configuring it + # harborDatabaseIndex defaults to "0", but it can be configured to "6", this config is optional + # cacheLayerDatabaseIndex defaults to "0", but it can be configured to "7", this config is optional + coreDatabaseIndex: "0" + jobserviceDatabaseIndex: "1" + registryDatabaseIndex: "2" + trivyAdapterIndex: "5" + # harborDatabaseIndex: "6" + # cacheLayerDatabaseIndex: "7" + # username field can be an empty string, and it will be authenticated against the default user + username: "" + password: "" + # If using existingSecret, the key must be REDIS_PASSWORD + existingSecret: "" + ## Additional deployment annotations + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + +exporter: + image: + repository: goharbor/harbor-exporter + tag: v2.11.1 + serviceAccountName: "" + # mount the service account token + automountServiceAccountToken: false + replicas: 1 + revisionHistoryLimit: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + extraEnvVars: [] + podAnnotations: {} + ## Additional deployment labels + podLabels: {} + nodeSelector: {} + tolerations: [] + affinity: {} + # Spread Pods across failure-domains like regions, availability zones or nodes + topologySpreadConstraints: [] + ## The priority class to run the pod as + priorityClassName: + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # nodeTaintsPolicy: Honor + # whenUnsatisfiable: DoNotSchedule + cacheDuration: 23 + cacheCleanInterval: 14400 From 78b7fcb2f114a9e798ac59bcc5c0020fe01cdcf7 Mon Sep 17 00:00:00 2001 From: Matheus Favaretto Date: Mon, 9 Sep 2024 09:13:57 -0300 Subject: [PATCH 6/6] Update README.md --- README.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/README.md b/README.md index 1d4657c..b194a19 100644 --- a/README.md +++ b/README.md @@ -223,14 +223,3 @@ make delete-harbor ``` This removes Harbor and deletes the `harbor` namespace. - -## Conclusion - -This Makefile simplifies the deployment, management, and deletion of various Kubernetes tools and services in a local Kind cluster environment. Feel free to modify the values and configurations in the charts for your specific use cases. -""" - -file_path = "/mnt/data/README.mdx" -with open(file_path, "w") as file: - file.write(mdx_content) - -file_path