diff --git a/ansible/inventory/openstack-flex/inventory.yaml.example b/ansible/inventory/openstack-flex/inventory.yaml.example index 359850f3..b6426509 100644 --- a/ansible/inventory/openstack-flex/inventory.yaml.example +++ b/ansible/inventory/openstack-flex/inventory.yaml.example @@ -75,7 +75,7 @@ all: children: k8s_cluster: vars: - cluster_name: rackerlabs.dev.local # This clustername should be changed to match your environment domain name. + cluster_name: cluster.local # If cluster_name is modified then cluster_domain_suffix will need to be modified for all the helm charts and for infrastructure operator configs too kube_ovn_iface: vlan206 # see the netplan snippet in etc/netplan/default-DHCP.yaml for more info. kube_ovn_default_interface_name: vlan206 # see the netplan snippet in etc/netplan/default-DHCP.yaml for more info. kube_ovn_central_hosts: "{{ groups['ovn_network_nodes'] }}" diff --git a/docs/build-test-envs.md b/docs/build-test-envs.md index a00100af..1f948f31 100644 --- a/docs/build-test-envs.md +++ b/docs/build-test-envs.md @@ -1,8 +1,10 @@ # Lab Build Demo -[![asciicast](https://asciinema.org/a/629776.svg)](https://asciinema.org/a/629776) +!!! Example "This section is only for test environments" + + The information on this page is only needed when building an environment in Virtual Machines. -The information on this page is only needed when building an environment in Virtual Machines. +[![asciicast](https://asciinema.org/a/629776.svg)](https://asciinema.org/a/629776) ## Prerequisites @@ -12,7 +14,7 @@ Take a moment to orient yourself, there are a few items to consider before movin !!! note - Your local genestack repository will be transferred to the eventual launcher instance for convenience **perfect for development**. See [Getting Started](quickstart.md) for an example on how to recursively clone the repository and its submodules. + Your local genestack repository will be transferred to the eventual launcher instance for convenience **perfect for development**. See [Getting Started](genestack-getting-started.md) for an example on how to recursively clone the repository and its submodules. ### Create a VirtualEnv diff --git a/docs/quickstart.md b/docs/genestack-getting-started.md similarity index 97% rename from docs/quickstart.md rename to docs/genestack-getting-started.md index 359b3a4c..24abef62 100644 --- a/docs/quickstart.md +++ b/docs/genestack-getting-started.md @@ -1,4 +1,4 @@ -# Quick Start Guide +# Getting the Genestack Repository Before you can do anything we need to get the code. Because we've sold our soul to the submodule devil, you're going to need to recursively clone the repo into your location. diff --git a/docs/index.md b/docs/index.md index 18d80d8a..063333b3 100644 --- a/docs/index.md +++ b/docs/index.md @@ -40,7 +40,7 @@ hide: Start building now. - [:octicons-play-24: Deployment Guide](quickstart.md) + [:octicons-play-24: Deployment Guide](genestack-getting-started.md) diff --git a/docs/infrastructure-gateway-api.md b/docs/infrastructure-gateway-api.md index 87147cdc..2faeac0c 100644 --- a/docs/infrastructure-gateway-api.md +++ b/docs/infrastructure-gateway-api.md @@ -39,7 +39,7 @@ kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/downloa Next, Install the NGINX Gateway Fabric controller ``` -cd /opt/genestack/submodules/nginx-gateway-fabric +cd /opt/genestack/submodules/nginx-gateway-fabric/deploy/helm-chart helm upgrade --install nginx-gateway-fabric . --namespace=nginx-gateway -f /opt/genestack/helm-configs/nginx-gateway-fabric/helm-overrides.yaml ``` @@ -50,6 +50,12 @@ Helm install does not automatically upgrade the crds for this resource. To upgra In this example we will look at how Prometheus UI is exposed through the gateway. For other services the gateway kustomization file for the service. +Rackspace specific gateway kustomization files can be applied like so +``` +cd /opt/genestack/kustomize/gateway +kubectl kustomize | kubectl apply -f - +``` + First, create the shared gateway and then the httproute resource for prometheus. ``` apiVersion: gateway.networking.k8s.io/v1 diff --git a/docs/infrastructure-letsencrypt.md b/docs/infrastructure-letsencrypt.md index 29850f4d..3e1995aa 100644 --- a/docs/infrastructure-letsencrypt.md +++ b/docs/infrastructure-letsencrypt.md @@ -32,12 +32,9 @@ EOF ## Use the proper TLS issuerRef !!! danger "Important for later helm installations!" - You must ensure your helm configuration is such that you set the - `endpoints.$service.host_fqdn_override.public.tls.issuerRef.name` for any - given endpoint to use our `letsencrypt-prod` ClusterIssuer. Similarly, - ensure that `endpoints.$service.host_fqdn_override.public.host` - is set to the external DNS hostname you plan to expose for a given - service endpoint. + The `letsencrypt-prod` ClusterIssuer is used to generate the certificate through cert-manager. This ClusterIssuer is applied using a Kustomize patch. However, to ensure that the certificate generation process is initiated, it is essential to include `endpoints.$service.host_fqdn_override.public.tls: {}` in the service helm override file. + Similarly, ensure that `endpoints.$service.host_fqdn_override.public.host` is set to the external DNS hostname you plan to expose for a given service endpoint. + This configuration is necessary for proper certificate generation and to ensure the service is accessible via the specified hostname. !!! example You can find several examples of this in the @@ -48,11 +45,7 @@ EOF image: host_fqdn_override: public: - tls: - secretName: glance-tls-api - issuerRef: - name: letsencrpyt-prod - kind: ClusterIssuer + tls: {} host: glance.api.your.domain.tld port: api: diff --git a/docs/infrastructure-libvirt.md b/docs/infrastructure-libvirt.md index d3485f67..7daf2b21 100644 --- a/docs/infrastructure-libvirt.md +++ b/docs/infrastructure-libvirt.md @@ -9,15 +9,5 @@ kubectl kustomize --enable-helm /opt/genestack/kustomize/libvirt | kubectl apply Once deployed you can validate functionality on your compute hosts with `virsh` ``` shell -root@openstack-flex-node-3:~# virsh -Welcome to virsh, the virtualization interactive terminal. - -Type: 'help' for help with commands - 'quit' to quit - -virsh # list - Id Name State --------------------- - -virsh # +kubectl exec -it $(kubectl get pods -l application=libvirt -o=jsonpath='{.items[0].metadata.name}' -n openstack) -n openstack -- virsh list ``` diff --git a/docs/infrastructure-ovn.md b/docs/infrastructure-ovn.md deleted file mode 100644 index 74371926..00000000 --- a/docs/infrastructure-ovn.md +++ /dev/null @@ -1,6 +0,0 @@ -# Deploy Open vSwitch OVN - -Note that we're not deploying Openvswitch, however, we are using it. The implementation on Genestack is assumed to be -done with Kubespray which deploys OVN as its networking solution. Because those components are handled by our infrastructure -there's nothing for us to manage / deploy in this environment. OpenStack will leverage OVN within Kubernetes following the -scaling/maintenance/management practices of kube-ovn. diff --git a/docs/k8s-kubespray.md b/docs/k8s-kubespray.md index 398fcdf9..16d26392 100644 --- a/docs/k8s-kubespray.md +++ b/docs/k8s-kubespray.md @@ -140,23 +140,3 @@ ansible-playbook --inventory /etc/genestack/inventory/openstack-flex-inventory.i Given the use of a venv, when running with `sudo` be sure to use the full path and pass through your environment variables; `sudo -E /home/ubuntu/.venvs/genestack/bin/ansible-playbook`. Once the cluster is online, you can run `kubectl` to interact with the environment. - -## Installing Kubernetes - -Currently only the k8s provider kubespray is supported and included as submodule into the code base. -A default inventory file for kubespray is provided at `/etc/genestack/inventory` and must be modified. - -!!! tip - - Existing OpenStack Ansible inventory can be converted using the `/opt/genestack/scripts/convert_osa_inventory.py` - script which provides a `hosts.yml` - -Once the inventory is updated and configuration altered (networking etc), the Kubernetes cluster can be initialized with -the `setup-kubernetes.yml` playbook which in addition will also label nodes for OpenStack installation. - -``` shell -source /opt/genestack/scripts/genestack.rc -cd /opt/genestack/ansible/playbooks - -ansible-playbook setup-kubernetes.yml -``` diff --git a/docs/openstack-floating-ips.md b/docs/openstack-floating-ips.md index 37dc0fea..f92a87c8 100644 --- a/docs/openstack-floating-ips.md +++ b/docs/openstack-floating-ips.md @@ -86,3 +86,210 @@ To remove the floating IP address from a project: ``` shell openstack floating ip delete FLOATING_IP_ADDRESS ``` + +#### Floating Ip Example + +Below is a quick example of how we can assign floating ips. + +You will need to get your cloud name from your clouds.yaml. More information on this can be found [here](build-test-envs.md). Underneath "clouds:" you will find your cloud name. + +First create a floating ip either from PUBLICNET or the public ip pool. + +``` shell +openstack --os-cloud={cloud_name} floating ip create PUBLICNET +``` + +Second get the cloud server UUID. + +``` shell +openstack --os-cloud={cloud_name} server list +``` + +Third add the floating ip to the server + +``` shell +openstack --os-cloud={cloud_name} server add floating ip {cloud_server_uuid} {floating_ip} +``` + +#### Shared floating IP and virtual IP + +You can often use a load balancer instead of a shared floating IP or virtual IP. +For advanced networking needs, using an instance that does something like you +might do with a network appliance operating system, you might need a real shared +floating IP that two instances can share with something like _keepalived_, but +you should probably use a load balancer unless you actually need the additional +capabilities from a shared floating IP or virtual IP. + +In _Genestack_ Flex, with OVN, you can implement a shared floating IP mostly as +standard for OpenStack, but Neutron's `allowed-address-pairs` depends on your +Neutron plugin, _ML2/OVN_ in this case, so while most OpenStack documentation +will show altering `allowed-address-pairs` with a CIDR as seen +[here](https://docs.openstack.org/neutron/latest/admin/archives/introduction.html#allowed-address-pairs), +OVN doesn't support CIDRs on its equivalent to port security on logical switch +ports in its NB database, so you just have to use a single IP address instead of +a CIDR. + +With that caveat, you can set up a shared floating IP like this: + +1. Create a Neutron network + + ``` shell + openstack network create tester-network + ``` + +2. Create a subnet for the network + + ``` shell + openstack subnet create --network tester-network --subnet-range 192.168.0.0/24 tester-subnet + ``` + +3. Create servers on the network + + ``` shell + openstack server create tester1 --flavor m1.tiny --key-name keypair --network tester-network --image $IMAGE_UUID + openstack server create tester2 --flavor m1.tiny --key-name keypair --network tester-network --image $IMAGE_UUID + ``` + +4. Create a port with a fixed IP for the VIP. + + ``` shell + openstack port create --fixed-ip subnet=tester-subnet \ + --network tester-network --no-security-group tester-vip-port + ``` + + You will probably want to note the IP on the port here as your VIP. + +5. Create a router + + You will typically need a router with an external gateway to use any + public IP, depending on your configuration. + + ``` + openstack router create tester-router + ``` + +6. Add at external Internet gateway to the router + + At Rackspace, we usually call the public Internet network for instances + PUBLICNET. You can use the name or ID that provides external networks + for your own installation. + + ``` + openstack router set --external-gateway PUBLICNET tester-router + ``` + +7. Add the subnet to the router + + ``` shell + openstack router add subnet tester-router tester-subnet + ``` + +8. Create a floating IP for the port + + You can't do this step until you've created the router as above, because + Neutron requires reachability between the subnet for the port and the + floating IP for the network. If you followed in order, this should work + here. + + ``` shell + openstack floating ip create --port tester-vip-port PUBLICNET + ``` + + Note and retain the ID and/or IP returned, since you will need it for the + next step. + +9. Put the floating IP in the `allowed-address-pair` list of the ports for your + two instances. + + Here, **specify only the VIP IP address**/**omit the netmask**. This deviates + from other examples you may see, which may include a netmask, because it can + vary with details of the plugin used with Neutron. For Neutron with ML2/OVN, + you only specify the IP address here, without a netmask. + + You use the private VIP because the DNAT occurs before it reaches the + instances. + + ``` + openstack port list server tester1 # retrieve port UUID + openstack port list server tester2 # retrieve port UUID + openstack port set --allowed-address ip-address= + openstack port set --allowed-address ip-address= + ``` + +The steps above complete creating the shared floating IP and VIP. The following +steps allow you to test it. + +1. Create a bastion server. + + With the two test instances connected to a subnet on a router with an + external gateway, they can reach the Internet, but you will probably need + a server with a floating IP to reach these two servers to install and + configure _keepalived_ and test your shared floating IP / VIP. This example + shows only a test. + + ``` shell + openstack server create tester-bastion --flavor m1.tiny \ + --key-name keypair --network tester-network --image $IMAGE_UUID + ``` + +2. Add floating IP to bastion server. + + You can specify the UUID or IP of the floating IP. + + ``` + openstack server add floating ip tester-bastion \ + 8a991c65-24c6-4125-a9c8-38d15e851c78 + ``` + +3. Alter security group rules to allow SSH and ICMP: + + You will likely find you can't SSH to the floating IP you added to the + instance unless you've altered your default security group or taken other + steps because the default security group will prevent all ingress traffic. + + We also add ICMP here for testing. + + ``` + openstack security group rule create --proto tcp --dst-port 22 \ + --remote-ip 0.0.0.0/0 default + openstack security group rule create --proto icmp --dst-port -1 default + ``` + +4. SSH to the first test instance from the bastion. + +5. Configure the VIP on the interface as a test on the first test instance: + + ``` + sudo ip address add /24 dev enp3s0 + ``` + + Note that you add the internal VIP here, not the floating public IP. Use + the appropriate netmask (usually /24 unless you picked something else.) + +6. Ping the floating IP. + + Ping should now work. For a general floating IP on the Internet, you can + usually ping from any location, so you don't necessarily have to use your + bastion. + + ``` shell + ping + ``` + + Since the ports for the two servers look almost identical, if it works on + one, it should work on the other, so you can delete the IP from the first + instance and try it on the second: + + ``` shell + sudo ip address del /24 dev enp3s0 + ``` + + You may need to ping the internal IP address from your bastion server or + take other steps to take care of the ARP caches. You can use arping on + the instance with the VIP for that: + + ``` shell + sudo arping -i enp3s0 -U -S # VIP twice + ``` + + and ^C/break out of it once ping starts working with the address. diff --git a/docs/openstack-servers.md b/docs/openstack-servers.md index ac4fd597..419411db 100644 --- a/docs/openstack-servers.md +++ b/docs/openstack-servers.md @@ -91,3 +91,49 @@ Please visit the Openstack Snapshot page [here](openstack-snapshot.md). # Launch a server from a volume Please visit the Openstack Volumes page [here](openstack-volumes.md). + +# Server Creation Example + +Below is a quick example of how one could set up a server. + +You will need to get your cloud name from your clouds.yaml. More information on this can be found [here](build-test-envs.md). Underneath "clouds:" you will find your cloud name. + +First we are going to create our network "my_network" + +``` shell +openstack --os-cloud={cloud_name} network create my_network +``` + +Second create the subnet "my_subnet" + +``` shell +openstack --os-cloud={cloud_name} subnet create --ip-version 4 --subnet-range {cidr range} --network my_network my_subnet +``` + +Third create the router "my_router" + +``` shell +openstack --os-cloud={cloud_name} router create my_router +``` + +Fourth add "my_subnet" to "my_router" and set the router's external gateway using PUBLICNET to allow outbound network access. + +``` shell +openstack --os-cloud={cloud_name} router add subnet my_router my_dmz_subnet + +openstack --os-cloud={cloud_name} router set --external-gateway PUBLICNET my_router +``` + +Fifth gather the UUIDS for our image, flavor and network to create our server. + +``` shell +openstack --os-cloud={cloud_name} image list +openstack --os-cloud={cloud_name} flavor list +openstack --os-cloud={cloud_name} network list +``` + +Lastly create your server! + +``` shell +openstack --os-cloud={cloud_name} server create --flavor {flavor uuid} --image {image uuid} --boot-from-volume 25 --network {network uuid} my_first_server +``` diff --git a/docs/sealed-secrets.md b/docs/sealed-secrets.md index e46d4dae..7c635cd9 100644 --- a/docs/sealed-secrets.md +++ b/docs/sealed-secrets.md @@ -1,5 +1,8 @@ -# Sealed Secrets Introduction and Installation Guide +!!! Danger "This section is still underdevelopment and experimental" + + None of the vault components are required to run a Genestack environment. +# Sealed Secrets Introduction and Installation Guide Sealed Secrets is a Kubernetes-native solution for securely storing and managing sensitive information within Kubernetes Secrets. It ensures secure secret management by encrypting Kubernetes Secrets and storing them as SealedSecret resources, which can only be decrypted by the cluster itself. diff --git a/docs/storage-swift-object-store.md b/docs/storage-swift-object-store.md index f2630bc0..60e0adea 100644 --- a/docs/storage-swift-object-store.md +++ b/docs/storage-swift-object-store.md @@ -4,12 +4,6 @@ Use the command-line utility `swift` to perform operations on your object store. -## Requirements - -!!! note - - FIXME: TBD - ## Swift client documentation ``` shell @@ -83,10 +77,6 @@ Verify the container's configuration: swift stat flex-container01 ``` -!!! example - - FIXME: Example coming soon! - ### Upload files to the container Upload the entire contents of a folder to the container: diff --git a/docs/vault-secrets-operator.md b/docs/vault-secrets-operator.md index 43279f99..e9552211 100644 --- a/docs/vault-secrets-operator.md +++ b/docs/vault-secrets-operator.md @@ -1,3 +1,7 @@ +!!! Danger "This section is still underdevelopment and experimental" + + None of the vault components are required to run a Genestack environment. + # HashiCorp Vault Secret Operators for Genestack Installation The Vault Secrets Operator (VSO) enables Pods to seamlessly consume Vault secrets from Kubernetes Secrets. This guide outlines the process of consuming secrets stored in Vault for Genestack installation. This is continuation of [vault.md](https://docs.rackspacecloud.com/vault/) where we have created few secrets in the Vault diff --git a/docs/vault.md b/docs/vault.md index 8be0fbde..da46bc11 100644 --- a/docs/vault.md +++ b/docs/vault.md @@ -1,3 +1,7 @@ +!!! Danger "This section is still underdevelopment and experimental" + + None of the vault components are required to run a Genestack environment. + # HashiCorp Vault Setup for Genestack Installation HashiCorp Vault is a versatile tool designed for secret management and data protection. It allows you to securely store and control access to various sensitive data, such as tokens, passwords, certificates, and API keys. In this guide, we will use HashiCorp Vault to store Kubernetes Secrets for the Genestack installation. diff --git a/helm-configs/cinder/cinder-helm-overrides.yaml b/helm-configs/cinder/cinder-helm-overrides.yaml index 96da2506..e3b90f20 100644 --- a/helm-configs/cinder/cinder-helm-overrides.yaml +++ b/helm-configs/cinder/cinder-helm-overrides.yaml @@ -791,6 +791,7 @@ conf: internal_project_name: internal_cinder internal_user_name: internal_cinder rootwrap_config: /etc/cinder/rootwrap.conf + use_multipath_for_image_xfer: False #Add Cinder Multipath support for image xfer database: max_retries: -1 keystone_authtoken: @@ -813,7 +814,25 @@ conf: oslo_middleware: enable_proxy_headers_parsing: true oslo_messaging_rabbit: - rabbit_ha_queues: true + # We define use of quorum queues via kustomize but this was enabling HA queues instead + # ha_queues are deprecated, explicitly set to false and set quorum_queue true + rabbit_ha_queues: false + rabbit_quorum_queue: true + # TODO: Not available until 2024.1, but once it is, we want to enable these! + # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html + # rabbit_transient_quorum_queue: true + # use_queue_manager: true + # Reconnect after a node outage more quickly + rabbit_interval_max: 10 + # Send more frequent heartbeats and fail unhealthy nodes faster + # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 + # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 + heartbeat_rate: 3 + heartbeat_timeout_threshold: 30 + # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html + # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 + kombu_reconnect_delay: 0.5 coordination: backend_url: file:///var/lib/cinder/coordination service_user: @@ -880,19 +899,7 @@ conf: format: "%(message)s" datefmt: "%Y-%m-%d %H:%M:%S" rabbitmq: - # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones - policies: - - vhost: "cinder" - name: "ha_ttl_cinder" - definition: - # mirror messges to other nodes in rmq cluster - ha-mode: "all" - ha-sync-mode: "automatic" - # 70s - message-ttl: 70000 - priority: 0 - apply-to: all - pattern: '^(?!(amq\.|reply_)).*' + policies: [] backends: # Those options will be written to backends.conf as-is. lvmdriver-1: diff --git a/helm-configs/glance/glance-helm-overrides.yaml b/helm-configs/glance/glance-helm-overrides.yaml index 474a295d..fd643429 100644 --- a/helm-configs/glance/glance-helm-overrides.yaml +++ b/helm-configs/glance/glance-helm-overrides.yaml @@ -267,7 +267,25 @@ conf: oslo_messaging_notifications: driver: messagingv2 oslo_messaging_rabbit: - rabbit_ha_queues: true + # We define use of quorum queues via kustomize but this was enabling HA queues instead + # ha_queues are deprecated, explicitly set to false and set quorum_queue true + rabbit_ha_queues: false + rabbit_quorum_queue: true + # TODO: Not available until 2024.1, but once it is, we want to enable these! + # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html + # rabbit_transient_quorum_queue: true + # use_queue_manager: true + # Reconnect after a node outage more quickly + rabbit_interval_max: 10 + # Send more frequent heartbeats and fail unhealthy nodes faster + # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 + # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 + heartbeat_rate: 3 + heartbeat_timeout_threshold: 30 + # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html + # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 + kombu_reconnect_delay: 0.5 oslo_policy: policy_file: /etc/glance/policy.yaml cors: {} @@ -361,19 +379,7 @@ conf: user_domain_id = {{- end -}} rabbitmq: - # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones - policies: - - vhost: "glance" - name: "ha_ttl_glance" - definition: - # mirror messges to other nodes in rmq cluster - ha-mode: "all" - ha-sync-mode: "automatic" - # 70s - message-ttl: 70000 - priority: 0 - apply-to: all - pattern: '^(?!(amq\.|reply_)).*' + policies: [] network: api: diff --git a/helm-configs/heat/heat-helm-overrides.yaml b/helm-configs/heat/heat-helm-overrides.yaml index 8f30f670..04875b92 100644 --- a/helm-configs/heat/heat-helm-overrides.yaml +++ b/helm-configs/heat/heat-helm-overrides.yaml @@ -371,7 +371,25 @@ conf: oslo_middleware: enable_proxy_headers_parsing: true oslo_messaging_rabbit: - rabbit_ha_queues: True + # We define use of quorum queues via kustomize but this was enabling HA queues instead + # ha_queues are deprecated, explicitly set to false and set quorum_queue true + rabbit_ha_queues: false + rabbit_quorum_queue: true + # TODO: Not available until 2024.1, but once it is, we want to enable these! + # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html + # rabbit_transient_quorum_queue: true + # use_queue_manager: true + # Reconnect after a node outage more quickly + rabbit_interval_max: 10 + # Send more frequent heartbeats and fail unhealthy nodes faster + # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 + # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 + heartbeat_rate: 3 + heartbeat_timeout_threshold: 30 + # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html + # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 + kombu_reconnect_delay: 0.5 oslo_policy: policy_file: /etc/heat/policy.yaml api_audit_map: @@ -463,19 +481,7 @@ conf: datefmt: "%Y-%m-%d %H:%M:%S" rabbitmq: - # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones - policies: - - vhost: "heat" - name: "ha_ttl_heat" - definition: - # mirror messges to other nodes in rmq cluster - ha-mode: "all" - ha-sync-mode: "automatic" - # 70s - message-ttl: 70000 - priority: 0 - apply-to: all - pattern: '^(?!(amq\.|reply_)).*' + policies: [] network: api: diff --git a/helm-configs/keystone/keystone-helm-overrides.yaml b/helm-configs/keystone/keystone-helm-overrides.yaml index 7661ee14..54834567 100644 --- a/helm-configs/keystone/keystone-helm-overrides.yaml +++ b/helm-configs/keystone/keystone-helm-overrides.yaml @@ -523,7 +523,25 @@ conf: oslo_messaging_notifications: driver: messagingv2 oslo_messaging_rabbit: - rabbit_ha_queues: true + # We define use of quorum queues via kustomize but this was enabling HA queues instead + # ha_queues are deprecated, explicitly set to false and set quorum_queue true + rabbit_ha_queues: false + rabbit_quorum_queue: true + # TODO: Not available until 2024.1, but once it is, we want to enable these! + # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html + # rabbit_transient_quorum_queue: true + # use_queue_manager: true + # Reconnect after a node outage more quickly + rabbit_interval_max: 10 + # Send more frequent heartbeats and fail unhealthy nodes faster + # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 + # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 + heartbeat_rate: 3 + heartbeat_timeout_threshold: + # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html + # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 + kombu_reconnect_delay: 0.5 oslo_middleware: enable_proxy_headers_parsing: true oslo_policy: @@ -546,19 +564,7 @@ conf: policy: {} access_rules: {} rabbitmq: - # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones - policies: - - vhost: "keystone" - name: "ha_ttl_keystone" - definition: - # mirror messges to other nodes in rmq cluster - ha-mode: "all" - ha-sync-mode: "automatic" - # 70s - message-ttl: 70000 - priority: 0 - apply-to: all - pattern: '^(?!(amq\.|reply_)).*' + policies: [] rally_tests: run_tempest: false tests: diff --git a/helm-configs/neutron/neutron-helm-overrides.yaml b/helm-configs/neutron/neutron-helm-overrides.yaml index 7176b670..77d67366 100644 --- a/helm-configs/neutron/neutron-helm-overrides.yaml +++ b/helm-configs/neutron/neutron-helm-overrides.yaml @@ -1793,7 +1793,25 @@ conf: oslo_messaging_notifications: driver: messagingv2 oslo_messaging_rabbit: - rabbit_ha_queues: true + # We define use of quorum queues via kustomize but this was enabling HA queues instead + # ha_queues are deprecated, explicitly set to false and set quorum_queue true + rabbit_ha_queues: false + rabbit_quorum_queue: true + # TODO: Not available until 2024.1, but once it is, we want to enable these! + # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html + # rabbit_transient_quorum_queue: true + # use_queue_manager: true + # Reconnect after a node outage more quickly + rabbit_interval_max: 10 + # Send more frequent heartbeats and fail unhealthy nodes faster + # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 + # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 + heartbeat_rate: 3 + heartbeat_timeout_threshold: 30 + # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html + # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 + kombu_reconnect_delay: 0.5 oslo_middleware: enable_proxy_headers_parsing: true oslo_policy: @@ -2019,19 +2037,7 @@ conf: bgp_dragent: {} rabbitmq: - # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones - policies: - - vhost: "neutron" - name: "ha_ttl_neutron" - definition: - # mirror messges to other nodes in rmq cluster - ha-mode: "all" - ha-sync-mode: "automatic" - # 70s - message-ttl: 70000 - priority: 0 - apply-to: all - pattern: '^(?!(amq\.|reply_)).*' + policies: [] ## NOTE: "besteffort" is meant for dev env with mixed compute type only. ## This helps prevent sriov init script from failing due to mis-matched NIC ## For prod env, target NIC should match and init script should fail otherwise. diff --git a/helm-configs/nova/nova-helm-overrides.yaml b/helm-configs/nova/nova-helm-overrides.yaml index c95ecd45..7cadd6a0 100644 --- a/helm-configs/nova/nova-helm-overrides.yaml +++ b/helm-configs/nova/nova-helm-overrides.yaml @@ -1437,7 +1437,7 @@ conf: disk_cachemodes: "network=writeback" hw_disk_discard: unmap cpu_mode: host-passthrough - iscsi_use_multipath: false # Disabled because multipathd is not configured or running + volume_use_multipath: false # Disabled because multipathd is not configured or running upgrade_levels: compute: auto cache: @@ -1448,7 +1448,25 @@ conf: oslo_messaging_notifications: driver: messagingv2 oslo_messaging_rabbit: - rabbit_ha_queues: true + # We define use of quorum queues via kustomize but this was enabling HA queues instead + # ha_queues are deprecated, explicitly set to false and set quorum_queue true + rabbit_ha_queues: false + rabbit_quorum_queue: true + # TODO: Not available until 2024.1, but once it is, we want to enable these! + # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html + # rabbit_transient_quorum_queue: true + # use_queue_manager: true + # Reconnect after a node outage more quickly + rabbit_interval_max: 10 + # Send more frequent heartbeats and fail unhealthy nodes faster + # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 + # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 + heartbeat_rate: 3 + heartbeat_timeout_threshold: 30 + # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html + # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 + kombu_reconnect_delay: 0.5 os_vif_ovs: ovsdb_connection: tcp:127.0.0.1:6640 placement: @@ -1527,19 +1545,7 @@ conf: format: "%(message)s" datefmt: "%Y-%m-%d %H:%M:%S" rabbitmq: - # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones - policies: - - vhost: "nova" - name: "ha_ttl_nova" - definition: - # mirror messges to other nodes in rmq cluster - ha-mode: "all" - ha-sync-mode: "automatic" - # 70s - message-ttl: 70000 - priority: 0 - apply-to: all - pattern: '^(?!(amq\.|reply_)).*' + policies: [] enable_iscsi: false archive_deleted_rows: purge_deleted_rows: false diff --git a/helm-configs/prod-example-openstack-overrides.yaml b/helm-configs/prod-example-openstack-overrides.yaml index 1d90681b..5dbcc581 100644 --- a/helm-configs/prod-example-openstack-overrides.yaml +++ b/helm-configs/prod-example-openstack-overrides.yaml @@ -103,11 +103,7 @@ endpoints: compute: host_fqdn_override: public: - tls: - secretName: nova-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: nova.dfw-ospcv2-staging.ohthree.com port: api: @@ -117,11 +113,7 @@ endpoints: compute_metadata: host_fqdn_override: public: - tls: - secretName: metadata-tls-metadata - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: metadata.nova.dfw-ospcv2-staging.ohthree.com port: metadata: @@ -131,11 +123,7 @@ endpoints: compute_novnc_proxy: host_fqdn_override: public: - tls: - secretName: nova-novncproxy-tls-proxy - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: novnc.nova.dfw-ospcv2-staging.ohthree.com port: novnc_proxy: @@ -145,11 +133,7 @@ endpoints: cloudformation: host_fqdn_override: public: - tls: - secretName: heat-tls-cfn - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: cloudformation.heat.dfw-ospcv2-staging.ohthree.com port: api: @@ -159,11 +143,7 @@ endpoints: cloudwatch: host_fqdn_override: public: - tls: - secretName: heat-tls-cloudwatch - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: cloudwatch.heat.dfw-ospcv2-staging.ohthree.com port: api: @@ -173,11 +153,7 @@ endpoints: dashboard: host_fqdn_override: public: - tls: - secretName: horizon-tls-web - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: dfw-ospcv2-staging.ohthree.com port: web: @@ -210,11 +186,7 @@ endpoints: region_name: *region host_fqdn_override: public: - tls: - secretName: keystone-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: keystone.dfw-ospcv2-staging.ohthree.com port: api: @@ -228,11 +200,7 @@ endpoints: image: host_fqdn_override: public: - tls: - secretName: glance-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: glance.dfw-ospcv2-staging.ohthree.com port: api: @@ -242,11 +210,7 @@ endpoints: load_balancer: host_fqdn_override: public: - tls: - secretName: octavia-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: octavia.dfw-ospcv2-staging.ohthree.com port: api: @@ -256,11 +220,7 @@ endpoints: network: host_fqdn_override: public: - tls: - secretName: neutron-tls-server - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: neutron.dfw-ospcv2-staging.ohthree.com port: api: @@ -270,11 +230,7 @@ endpoints: orchestration: host_fqdn_override: public: - tls: - secretName: heat-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: heat.dfw-ospcv2-staging.ohthree.com port: api: @@ -284,11 +240,7 @@ endpoints: placement: host_fqdn_override: public: - tls: - secretName: placement-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: placement.dfw-ospcv2-staging.ohthree.com port: api: @@ -298,11 +250,7 @@ endpoints: volume: host_fqdn_override: public: - tls: - secretName: cinder-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: cinder.dfw-ospcv2-staging.ohthree.com port: api: @@ -312,11 +260,7 @@ endpoints: volumev2: host_fqdn_override: public: - tls: - secretName: cinder-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: cinder.dfw-ospcv2-staging.ohthree.com port: api: @@ -326,11 +270,7 @@ endpoints: volumev3: host_fqdn_override: public: - tls: - secretName: cinder-tls-api - issuerRef: - name: letsencrypt-prod - kind: ClusterIssuer + tls: {} host: cinder.dfw-ospcv2-staging.ohthree.com port: api: diff --git a/kustomize/ingress/external/helm/ingress-helm-overrides.yaml b/kustomize/ingress/external/helm/ingress-helm-overrides.yaml index 9b5fb159..c32f6b7c 100644 --- a/kustomize/ingress/external/helm/ingress-helm-overrides.yaml +++ b/kustomize/ingress/external/helm/ingress-helm-overrides.yaml @@ -1,350 +1,1104 @@ -deployment: - mode: namespace - type: Deployment - cluster: - class: "nginx-openstack" - ingressClassByName: false - controllerClass: "k8s.io/nginx-ingress" - -images: - tags: - entrypoint: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 - ingress: registry.k8s.io/ingress-nginx/controller:v1.10.1 - ingress_module_init: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - ingress_routed_vip: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - error_pages: registry.k8s.io/defaultbackend:1.4 - keepalived: docker.io/osixia/keepalived:1.4.5 - dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 - image_repo_sync: docker.io/library/docker:17.07.0 - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -pod: - security_context: - error_pages: - pod: - runAsUser: 65534 - container: - ingress_error_pages: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - server: - pod: - runAsUser: 65534 - container: - ingress_vip_kernel_modules: - capabilities: - add: - - SYS_MODULE - readOnlyRootFilesystem: true - runAsUser: 0 - ingress_vip_init: - capabilities: - add: - - NET_ADMIN - readOnlyRootFilesystem: true - runAsUser: 0 - ingress: - readOnlyRootFilesystem: false - runAsUser: 101 - ingress_vip: - capabilities: - add: - - NET_ADMIN - readOnlyRootFilesystem: true - runAsUser: 0 - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: openstack-control-plane - operator: In - values: - - enabled - tolerations: - ingress: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - dns_policy: "ClusterFirstWithHostNet" - replicas: - ingress: 3 - error_page: 2 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - termination_grace_period: - server: - timeout: 60 - error_pages: - timeout: 60 - resources: +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +nameOverride: "ingress-nginx" +# fullnameOverride: + +# -- Override the deployment namespace; defaults to .Release.Namespace +namespaceOverride: "" +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + enableAnnotationValidations: false + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.10.1" + digest: sha256:e24f39d3eed6bcc239a56f20098878845f62baa34b9f2be2fd2c38ce9fb0f29e + digestChroot: sha256:c155954116b397163c88afcb3252462771bd7867017e8a17623e83601bab7ac7 + pullPolicy: IfNotPresent + runAsNonRoot: true + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + readOnlyRootFilesystem: false + # -- Use an existing PSP instead of creating one + existingPsp: "" + # -- Configures the controller container name + containerName: controller + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + # -- Optionally customize the pod hostAliases. + hostAliases: [] + # - ip: 127.0.0.1 + # hostnames: + # - foo.local + # - bar.local + # - ip: 10.1.2.3 + # hostnames: + # - foo.remote + # - bar.remote + # -- Optionally customize the pod hostname. + hostname: {} + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + # -- This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode="auto" + # Defaults to false + enableTopologyAwareRouting: false + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: false + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not enabled: false - ingress: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - error_pages: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - jobs: - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -labels: - server: - node_selector_key: openstack-control-plane - node_selector_value: enabled - error_server: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -network: - host_namespace: false - vip: - manage: false - # what type of vip manage machanism will be used - # possible options: routed, keepalived - mode: routed - interface: ingress-vip - addr: 172.18.0.1/32 - keepalived_router_id: 100 - # Use .network.vip.addr as an external IP for the service - # Useful if the CNI or provider can set up routes, etc. - assign_as_external_ip: false - ingressClass: - spec: - controller: null - ingress: - spec: - ingressClassName: null - node_port: - enabled: false - http_port: 30080 - https_port: 30443 - annotations: - # NOTE(portdirect): if left blank this is populated from - # .deployment.cluster.class - nginx.ingress.kubernetes.io/proxy-body-size: "0" - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Content-Type-Options: nosniff"; - more_set_headers "X-Frame-Options: deny"; - more_set_headers "X-Permitted-Cross-Domain-Policies: none"; - more_set_headers "Content-Security-Policy: script-src 'self'"; - external_policy_local: false - -dependencies: - dynamic: - common: - local_image_registry: - jobs: - - ingress-image-repo-sync - services: - - endpoint: node - service: local_image_registry - static: - error_pages: - jobs: null - ingress: - jobs: null - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - -monitoring: - prometheus: + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + # NetworkPolicy for controller component. + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader' + electionID: "" + # -- This section refers to the creation of the IngressClass resource. + # IngressClasses are immutable and cannot be changed after creation. + # We do not support namespaced IngressClasses, yet, so a ClusterRole and a ClusterRoleBinding is required. + ingressClassResource: + # -- Name of the IngressClass + name: nginx + # -- Create the IngressClass or not + enabled: true + # -- If true, Ingresses without `ingressClassName` get assigned to this IngressClass on creation. + # Ingress creation gets rejected if there are multiple default IngressClasses. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class + default: false + # -- Controller of the IngressClass. An Ingress Controller looks for IngressClasses it should reconcile by this value. + # This value is also being set as the `--controller-class` argument of this Ingress Controller. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class + controllerValue: k8s.io/ingress-nginx + # -- A link to a custom resource containing additional configuration for the controller. + # This is optional if the controller consuming this IngressClass does not require additional parameters. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class + parameters: {} + # parameters: + # apiGroup: k8s.example.com + # kind: IngressParameters + # name: external-lb + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security context for controller pods + podSecurityContext: {} + # -- sysctls for controller pods + ## Ref: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + # -- Security context for controller containers + containerSecurityContext: {} + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not enabled: true - ingress_exporter: - scrape: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + # -- Additional command line arguments to pass to Ingress-Nginx Controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + ## time-buckets: "0.005,0.01,0.025,0.05,0.1,0.25,0.5,1,2.5,5,10" + ## length-buckets: "10,20,30,40,50,60,70,80,90,100" + ## size-buckets: "10,100,1000,10000,100000,1e+06,1e+07" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: controller + # topologyKey: topology.kubernetes.io/zone + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: controller + # topologyKey: kubernetes.io/hostname + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + kubernetes.io/os: linux + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + replicaCount: 1 + # -- Minimum available pods set in PodDisruptionBudget. + # Define either 'minAvailable' or 'maxUnavailable', never both. + minAvailable: 1 + # -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored. + # maxUnavailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + annotations: {} + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + # fallback: + # failureThreshold: 3 + # replicas: 11 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + service: + # -- Enable controller services or not. This does not influence the creation of either the admission webhook or the metrics service. + enabled: true + external: + # -- Enable the external controller service or not. Useful for internal-only deployments. + enabled: true + # -- Annotations to be added to the external controller service. See `controller.service.internal.annotations` for annotations to be added to the internal controller service. + annotations: + metallb.universe.tf/address-pool: openstack-external + # -- Labels to be added to both controller services. + labels: {} + # -- Type of the external controller service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: LoadBalancer + # -- Pre-defined cluster internal IP address of the external controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIP: "" + # -- List of node IP addresses at which the external controller service is available. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + externalIPs: [] + # -- Deprecated: Pre-defined IP address of the external controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + # -- Restrict access to the external controller service. Values must be CIDRs. Allows any source address by default. + loadBalancerSourceRanges: [] + # -- Load balancer class of the external controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + # -- Enable node port allocation for the external controller service or not. Applies to type `LoadBalancer` only. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + # allocateLoadBalancerNodePorts: true + + # -- External traffic policy of the external controller service. Set to "Local" to preserve source IP on providers supporting it. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: "" + # -- Session affinity of the external controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". + # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + # -- Specifies the health check node port (numeric port number) for the external controller service. + # If not specified, the service controller allocates a port from your cluster's node port range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 -endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: + # -- Represents the dual-stack capabilities of the external controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. + # Fields `ipFamilies` and `clusterIP` depend on the value of this field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilyPolicy: SingleStack + # -- List of IP families (e.g. IPv4, IPv6) assigned to the external controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilies: + - IPv4 + # -- Enable the HTTP listener on both controller services or not. + enableHttp: true + # -- Enable the HTTPS listener on both controller services or not. + enableHttps: true + ports: + # -- Port the external HTTP listener is published with. + http: 80 + # -- Port the external HTTPS listener is published with. + https: 443 + targetPorts: + # -- Port of the ingress controller the external HTTP listener is mapped to. + http: http + # -- Port of the ingress controller the external HTTPS listener is mapped to. + https: https + # -- Declare the app protocol of the external HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + appProtocol: true + nodePorts: + # -- Node port allocated for the external HTTP listener. If left empty, the service controller allocates one from the configured node port range. + http: "" + # -- Node port allocated for the external HTTPS listener. If left empty, the service controller allocates one from the configured node port range. + https: "" + # -- Node port mapping for external TCP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # tcp: + # 8080: 30080 + tcp: {} + # -- Node port mapping for external UDP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # udp: + # 53: 30053 + udp: {} + internal: + # -- Enable the internal controller service or not. Remember to configure `controller.service.internal.annotations` when enabling this. enabled: false - ingress: - username: ingress - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - ingress: - hosts: - default: ingress - error_pages: ingress-error-pages - host_fqdn_override: - default: null - # NOTE: The values under .endpoints.ingress.host_fqdn_override.public.tls - # will be used for the default SSL certificate. - # See also the .conf.default_ssl_certificate options below. - public: - tls: - crt: "" - key: "" - port: - http: - default: 80 - https: - default: 443 - healthz: - default: 10254 - status: - default: 10246 - stream: - default: 10247 - profiler: - default: 10245 - server: - default: 8181 - ingress_exporter: - namespace: null - hosts: - default: ingress-exporter - host_fqdn_override: - default: null - path: - default: null - scheme: - default: 'http' - port: - metrics: - default: 10254 - kube_dns: - namespace: kube-system - name: kubernetes-dns - hosts: - default: kube-dns - host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns_tcp: - default: 53 - dns: - default: 53 - protocol: UDP - -network_policy: - ingress: - ingress: - - {} - egress: - - {} - -secrets: - oci_image_registry: - ingress: ingress-oci-image-registry-key - tls: - ingress: - api: - # .secrets.tls.ingress.api.public="name of the TLS secret to create for the default cert" - # NOTE: The contents of the secret are from .endpoints.ingress.host_fqdn_override.public.tls - public: default-tls-public - dhparam: - secret_dhparam: | -conf: - ingress: - enable-underscores-in-headers: "true" - # NOTE(portdirect): if left blank this is populated from - # .network.vip.addr when running in host networking - # and .network.vip.manage=true, otherwise it is left as - # an empty string (the default). - bind-address: null - enable-vts-status: "true" - server-tokens: "false" - ssl-dh-param: openstack/secret-dhparam - worker-processes: "4" - # This block sets the --default-ssl-certificate option - # https://kubernetes.github.io/ingress-nginx/user-guide/tls/#default-ssl-certificate - default_ssl_certificate: - # .conf.default_ssl_certificate.enabled=true: use a default certificate + # -- Annotations to be added to the internal controller service. Mandatory for the internal controller service to be created. Varies with the cloud service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + annotations: {} + # -- Type of the internal controller service. + # Defaults to the value of `controller.service.type`. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: "" + # -- Pre-defined cluster internal IP address of the internal controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIP: "" + # -- List of node IP addresses at which the internal controller service is available. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + externalIPs: [] + # -- Deprecated: Pre-defined IP address of the internal controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + # -- Restrict access to the internal controller service. Values must be CIDRs. Allows any source address by default. + loadBalancerSourceRanges: [] + # -- Load balancer class of the internal controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + # -- Enable node port allocation for the internal controller service or not. Applies to type `LoadBalancer` only. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + # allocateLoadBalancerNodePorts: true + + # -- External traffic policy of the internal controller service. Set to "Local" to preserve source IP on providers supporting it. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: "" + # -- Session affinity of the internal controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". + # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + # -- Specifies the health check node port (numeric port number) for the internal controller service. + # If not specified, the service controller allocates a port from your cluster's node port range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack capabilities of the internal controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. + # Fields `ipFamilies` and `clusterIP` depend on the value of this field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilyPolicy: SingleStack + # -- List of IP families (e.g. IPv4, IPv6) assigned to the internal controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilies: + - IPv4 + ports: {} + # -- Port the internal HTTP listener is published with. + # Defaults to the value of `controller.service.ports.http`. + # http: 80 + # -- Port the internal HTTPS listener is published with. + # Defaults to the value of `controller.service.ports.https`. + # https: 443 + + targetPorts: {} + # -- Port of the ingress controller the internal HTTP listener is mapped to. + # Defaults to the value of `controller.service.targetPorts.http`. + # http: http + # -- Port of the ingress controller the internal HTTPS listener is mapped to. + # Defaults to the value of `controller.service.targetPorts.https`. + # https: https + + # -- Declare the app protocol of the internal HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + appProtocol: true + nodePorts: + # -- Node port allocated for the internal HTTP listener. If left empty, the service controller allocates one from the configured node port range. + http: "" + # -- Node port allocated for the internal HTTPS listener. If left empty, the service controller allocates one from the configured node port range. + https: "" + # -- Node port mapping for internal TCP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # tcp: + # 8080: 30080 + tcp: {} + # -- Node port mapping for internal UDP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # udp: + # 53: 30053 + udp: {} + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + # -- Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module + extraModules: [] + # - name: mytestmodule + # image: + # registry: registry.k8s.io + # image: ingress-nginx/mytestmodule + # ## for backwards compatibility consider setting the full image url via the repository value below + # ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + # ## repository: + # tag: "v1.0.0" + # digest: "" + # distroless: false + # containerSecurityContext: + # runAsNonRoot: true + # runAsUser: + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # resources: {} + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + opentelemetry: + enabled: false + name: opentelemetry + image: + registry: registry.k8s.io + image: ingress-nginx/opentelemetry + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v20230721-3e2062ee5" + digest: sha256:13bee3f5223883d3ca62fee7309ad02d22ec00ff0d7033e3e9aca7a9f60fd472 + distroless: true + containerSecurityContext: + runAsNonRoot: true + # -- The image's default user, inherited from its base image `cgr.dev/chainguard/static`. + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + admissionWebhooks: + name: admission + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + # -- Use an existing PSP instead of creating one + existingPsp: "" + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + createSecretJob: + name: create + # -- Security context for secret creation containers + securityContext: + runAsNonRoot: true + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + patchWebhookJob: + name: patch + # -- Security context for webhook patch containers + securityContext: + runAsNonRoot: true + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.4.1 + digest: sha256:36d05b4077fb8e3d13663702fa337f124675ba8667cbd949c03a8e8ea6fa4366 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + # NetworkPolicy for webhook patch + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + # -- Security context for secret creation & webhook patch pods + securityContext: {} + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + # default to be 5y + duration: "" + admissionCert: + # default to be 1y + duration: "" + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + metrics: + port: 10254 + portName: metrics + # if this port is changed, change healthz-port: in extraArgs: accordingly enabled: false - # If referencing an existing TLS secret with the default cert - # .conf.default_ssl_certificate.name="name of the secret" - # (defaults to value of .secrets.tls.ingress.api.public) - # .conf.default_ssl_certificate.namespace="namespace of the secret" - # (optional, defaults to release namespace) + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + # -- Labels to be added to the metrics service resource + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + serviceMonitor: + enabled: false + additionalLabels: {} + annotations: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace or namespaceOverride only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # # By default a fake self-signed certificate is generated as default and + # # it is fine if it expires. If `--default-ssl-certificate` flag is used + # # and a valid certificate passed please do not filter for `host` label! + # # (i.e. delete `{host!="_"}` so also the default SSL certificate is + # # checked for expiration) + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds{host!="_"}) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + priorityClassName: "" +# -- Rollback limit +## +revisionHistoryLimit: 10 +## Default 404 backend +## +defaultBackend: + ## + enabled: false + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + runAsNonRoot: true + # nobody user -> uid 65534 + runAsUser: 65534 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + readOnlyRootFilesystem: true + # -- Use an existing PSP instead of creating one + existingPsp: "" + extraArgs: {} + serviceAccount: + create: true name: "" - namespace: "" - # NOTE: To create a new secret to hold the default certificate, leave the - # above values empty, and specify: - # .endpoints.ingress.host_fqdn_override.public.tls.crt="PEM cert data" - # .endpoints.ingress.host_fqdn_override.public.tls.key="PEM key data" - # .manifests.secret_ingress_tls=true - services: - tcp: null - udp: null - -manifests: - configmap_bin: true - configmap_conf: true - configmap_services_tcp: true - configmap_services_udp: true - deployment_error: true - deployment_ingress: true - endpoints_ingress: true - ingress: true - ingressClass: true - secret_ingress_tls: false - secret_dhparam: false - service_error: true - service_ingress: true - job_image_repo_sync: true - monitoring: - prometheus: - service_exporter: true - network_policy: false - secret_registry: true + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + port: 8080 + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + # -- Security context for default backend pods + podSecurityContext: {} + # -- Security context for default backend containers + containerSecurityContext: {} + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + kubernetes.io/os: linux + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + replicaCount: 1 + minAvailable: 1 + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraConfigMaps: [] + ## Additional configmaps to the default backend pod. + # - name: my-extra-configmap-1 + # labels: + # type: config-1 + # data: + # extra_file_1.html: | + # + # - name: my-extra-configmap-2 + # labels: + # type: config-2 + # data: + # extra_file_2.html: | + # + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + # NetworkPolicy for default backend component. + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + service: + annotations: {} + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# "8080": "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# "53": "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: "" diff --git a/kustomize/ingress/grafana/helm/ingress-helm-overrides.yaml b/kustomize/ingress/grafana/helm/ingress-helm-overrides.yaml deleted file mode 100644 index 567406c1..00000000 --- a/kustomize/ingress/grafana/helm/ingress-helm-overrides.yaml +++ /dev/null @@ -1,350 +0,0 @@ -deployment: - mode: cluster - type: Deployment - cluster: - class: "nginx-grafana" - ingressClassByName: false - controllerClass: "k8s.io/nginx-ingress" - -images: - tags: - entrypoint: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 - ingress: registry.k8s.io/ingress-nginx/controller:v1.10.1 - ingress_module_init: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - ingress_routed_vip: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - error_pages: registry.k8s.io/defaultbackend:1.4 - keepalived: docker.io/osixia/keepalived:1.4.5 - dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 - image_repo_sync: docker.io/library/docker:17.07.0 - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -pod: - security_context: - error_pages: - pod: - runAsUser: 65534 - container: - ingress_error_pages: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - server: - pod: - runAsUser: 65534 - container: - ingress_vip_kernel_modules: - capabilities: - add: - - SYS_MODULE - readOnlyRootFilesystem: true - runAsUser: 0 - ingress_vip_init: - capabilities: - add: - - NET_ADMIN - readOnlyRootFilesystem: true - runAsUser: 0 - ingress: - readOnlyRootFilesystem: false - runAsUser: 101 - ingress_vip: - capabilities: - add: - - NET_ADMIN - readOnlyRootFilesystem: true - runAsUser: 0 - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: openstack-control-plane - operator: In - values: - - enabled - tolerations: - ingress: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - dns_policy: "ClusterFirstWithHostNet" - replicas: - ingress: 3 - error_page: 2 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - termination_grace_period: - server: - timeout: 60 - error_pages: - timeout: 60 - resources: - enabled: false - ingress: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - error_pages: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - jobs: - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -labels: - server: - node_selector_key: openstack-control-plane - node_selector_value: enabled - error_server: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -network: - host_namespace: false - vip: - manage: false - # what type of vip manage machanism will be used - # possible options: routed, keepalived - mode: routed - interface: ingress-vip - addr: 172.18.0.1/32 - keepalived_router_id: 100 - # Use .network.vip.addr as an external IP for the service - # Useful if the CNI or provider can set up routes, etc. - assign_as_external_ip: false - ingressClass: - spec: - controller: null - ingress: - spec: - ingressClassName: null - node_port: - enabled: false - http_port: 30080 - https_port: 30443 - annotations: - # NOTE(portdirect): if left blank this is populated from - # .deployment.cluster.class - nginx.ingress.kubernetes.io/proxy-body-size: "0" - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Content-Type-Options: nosniff"; - more_set_headers "X-Frame-Options: deny"; - more_set_headers "X-Permitted-Cross-Domain-Policies: none"; - more_set_headers "Content-Security-Policy: script-src 'self'"; - external_policy_local: false - -dependencies: - dynamic: - common: - local_image_registry: - jobs: - - ingress-image-repo-sync - services: - - endpoint: node - service: local_image_registry - static: - error_pages: - jobs: null - ingress: - jobs: null - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - -monitoring: - prometheus: - enabled: true - ingress_exporter: - scrape: true - port: 10254 - -endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - ingress: - username: ingress - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - ingress: - hosts: - default: ingress - error_pages: ingress-error-pages - host_fqdn_override: - default: null - # NOTE: The values under .endpoints.ingress.host_fqdn_override.public.tls - # will be used for the default SSL certificate. - # See also the .conf.default_ssl_certificate options below. - public: - tls: - crt: "" - key: "" - port: - http: - default: 80 - https: - default: 443 - healthz: - default: 10254 - status: - default: 10246 - stream: - default: 10247 - profiler: - default: 10245 - server: - default: 8181 - ingress_exporter: - namespace: null - hosts: - default: ingress-exporter - host_fqdn_override: - default: null - path: - default: null - scheme: - default: 'http' - port: - metrics: - default: 10254 - kube_dns: - namespace: kube-system - name: kubernetes-dns - hosts: - default: kube-dns - host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns_tcp: - default: 53 - dns: - default: 53 - protocol: UDP - -network_policy: - ingress: - ingress: - - {} - egress: - - {} - -secrets: - oci_image_registry: - ingress: ingress-oci-image-registry-key - tls: - ingress: - api: - # .secrets.tls.ingress.api.public="name of the TLS secret to create for the default cert" - # NOTE: The contents of the secret are from .endpoints.ingress.host_fqdn_override.public.tls - public: default-tls-public - dhparam: - secret_dhparam: | -conf: - ingress: - enable-underscores-in-headers: "true" - # NOTE(portdirect): if left blank this is populated from - # .network.vip.addr when running in host networking - # and .network.vip.manage=true, otherwise it is left as - # an empty string (the default). - bind-address: null - enable-vts-status: "true" - server-tokens: "false" - ssl-dh-param: openstack/secret-dhparam - worker-processes: "4" - # This block sets the --default-ssl-certificate option - # https://kubernetes.github.io/ingress-nginx/user-guide/tls/#default-ssl-certificate - default_ssl_certificate: - # .conf.default_ssl_certificate.enabled=true: use a default certificate - enabled: false - # If referencing an existing TLS secret with the default cert - # .conf.default_ssl_certificate.name="name of the secret" - # (defaults to value of .secrets.tls.ingress.api.public) - # .conf.default_ssl_certificate.namespace="namespace of the secret" - # (optional, defaults to release namespace) - name: "" - namespace: "" - # NOTE: To create a new secret to hold the default certificate, leave the - # above values empty, and specify: - # .endpoints.ingress.host_fqdn_override.public.tls.crt="PEM cert data" - # .endpoints.ingress.host_fqdn_override.public.tls.key="PEM key data" - # .manifests.secret_ingress_tls=true - services: - tcp: null - udp: null - -manifests: - configmap_bin: true - configmap_conf: true - configmap_services_tcp: true - configmap_services_udp: true - deployment_error: true - deployment_ingress: true - endpoints_ingress: true - ingress: true - ingressClass: true - secret_ingress_tls: false - secret_dhparam: false - service_error: true - service_ingress: true - job_image_repo_sync: true - monitoring: - prometheus: - service_exporter: true - network_policy: false - secret_registry: true diff --git a/kustomize/ingress/grafana/kustomization.yaml b/kustomize/ingress/grafana/kustomization.yaml deleted file mode 100644 index e571c69a..00000000 --- a/kustomize/ingress/grafana/kustomization.yaml +++ /dev/null @@ -1,11 +0,0 @@ -helmCharts: - - name: ingress-nginx - releaseName: ingress-grafana-internal - repo: https://kubernetes.github.io/ingress-nginx - valuesInline: - deployment: - cluster: - class: nginx - valuesFile: helm/ingress-helm-overrides.yaml - namespace: grafana - includeCRDs: true diff --git a/kustomize/ingress/internal/helm/ingress-helm-overrides.yaml b/kustomize/ingress/internal/helm/ingress-helm-overrides.yaml index 94c94b41..567dbaa1 100644 --- a/kustomize/ingress/internal/helm/ingress-helm-overrides.yaml +++ b/kustomize/ingress/internal/helm/ingress-helm-overrides.yaml @@ -1,350 +1,1103 @@ -deployment: - mode: cluster - type: Deployment - cluster: - class: "nginx-openstack" - ingressClassByName: false - controllerClass: "k8s.io/nginx-ingress" - -images: - tags: - entrypoint: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 - ingress: registry.k8s.io/ingress-nginx/controller:v1.10.1 - ingress_module_init: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - ingress_routed_vip: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - error_pages: registry.k8s.io/defaultbackend:1.4 - keepalived: docker.io/osixia/keepalived:1.4.5 - dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 - image_repo_sync: docker.io/library/docker:17.07.0 - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -pod: - security_context: - error_pages: - pod: - runAsUser: 65534 - container: - ingress_error_pages: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - server: - pod: - runAsUser: 65534 - container: - ingress_vip_kernel_modules: - capabilities: - add: - - SYS_MODULE - readOnlyRootFilesystem: true - runAsUser: 0 - ingress_vip_init: - capabilities: - add: - - NET_ADMIN - readOnlyRootFilesystem: true - runAsUser: 0 - ingress: - readOnlyRootFilesystem: false - runAsUser: 101 - ingress_vip: - capabilities: - add: - - NET_ADMIN - readOnlyRootFilesystem: true - runAsUser: 0 - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: openstack-control-plane - operator: In - values: - - enabled - tolerations: - ingress: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - dns_policy: "ClusterFirstWithHostNet" - replicas: - ingress: 3 - error_page: 2 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - termination_grace_period: - server: - timeout: 60 - error_pages: - timeout: 60 - resources: +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +# -- Override the deployment namespace; defaults to .Release.Namespace +namespaceOverride: "" +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + enableAnnotationValidations: false + image: + ## Keep false as default for now! + chroot: false + registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.10.1" + digest: sha256:e24f39d3eed6bcc239a56f20098878845f62baa34b9f2be2fd2c38ce9fb0f29e + digestChroot: sha256:c155954116b397163c88afcb3252462771bd7867017e8a17623e83601bab7ac7 + pullPolicy: IfNotPresent + runAsNonRoot: true + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + readOnlyRootFilesystem: false + # -- Use an existing PSP instead of creating one + existingPsp: "" + # -- Configures the controller container name + containerName: controller + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + # -- Optionally customize the pod hostAliases. + hostAliases: [] + # - ip: 127.0.0.1 + # hostnames: + # - foo.local + # - bar.local + # - ip: 10.1.2.3 + # hostnames: + # - foo.remote + # - bar.remote + # -- Optionally customize the pod hostname. + hostname: {} + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + # -- This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode="auto" + # Defaults to false + enableTopologyAwareRouting: false + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: false + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not enabled: false - ingress: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - error_pages: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - jobs: - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -labels: - server: - node_selector_key: openstack-control-plane - node_selector_value: enabled - error_server: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -network: - host_namespace: false - vip: - manage: false - # what type of vip manage machanism will be used - # possible options: routed, keepalived - mode: routed - interface: ingress-vip - addr: 172.18.0.1/32 - keepalived_router_id: 100 - # Use .network.vip.addr as an external IP for the service - # Useful if the CNI or provider can set up routes, etc. - assign_as_external_ip: false - ingressClass: - spec: - controller: null - ingress: - spec: - ingressClassName: null - node_port: - enabled: false - http_port: 30080 - https_port: 30443 - annotations: - # NOTE(portdirect): if left blank this is populated from - # .deployment.cluster.class - nginx.ingress.kubernetes.io/proxy-body-size: "0" - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Content-Type-Options: nosniff"; - more_set_headers "X-Frame-Options: deny"; - more_set_headers "X-Permitted-Cross-Domain-Policies: none"; - more_set_headers "Content-Security-Policy: script-src 'self'"; - external_policy_local: false - -dependencies: - dynamic: - common: - local_image_registry: - jobs: - - ingress-image-repo-sync - services: - - endpoint: node - service: local_image_registry - static: - error_pages: - jobs: null - ingress: - jobs: null - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - -monitoring: - prometheus: + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + # NetworkPolicy for controller component. + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader' + electionID: "" + # -- This section refers to the creation of the IngressClass resource. + # IngressClasses are immutable and cannot be changed after creation. + # We do not support namespaced IngressClasses, yet, so a ClusterRole and a ClusterRoleBinding is required. + ingressClassResource: + # -- Name of the IngressClass + name: nginx-openstack + # -- Create the IngressClass or not enabled: true - ingress_exporter: - scrape: true + # -- If true, Ingresses without `ingressClassName` get assigned to this IngressClass on creation. + # Ingress creation gets rejected if there are multiple default IngressClasses. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class + default: false + # -- Controller of the IngressClass. An Ingress Controller looks for IngressClasses it should reconcile by this value. + # This value is also being set as the `--controller-class` argument of this Ingress Controller. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class + controllerValue: k8s.io/ingress-nginx-openstack + # -- A link to a custom resource containing additional configuration for the controller. + # This is optional if the controller consuming this IngressClass does not require additional parameters. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class + parameters: {} + # parameters: + # apiGroup: k8s.example.com + # kind: IngressParameters + # name: external-lb + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx-openstack + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security context for controller pods + podSecurityContext: {} + # -- sysctls for controller pods + ## Ref: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + # -- Security context for controller containers + containerSecurityContext: {} + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + # -- Additional command line arguments to pass to Ingress-Nginx Controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + ## time-buckets: "0.005,0.01,0.025,0.05,0.1,0.25,0.5,1,2.5,5,10" + ## length-buckets: "10,20,30,40,50,60,70,80,90,100" + ## size-buckets: "10,100,1000,10000,100000,1e+06,1e+07" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: controller + # topologyKey: topology.kubernetes.io/zone + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: controller + # topologyKey: kubernetes.io/hostname + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + kubernetes.io/os: linux + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + replicaCount: 1 + # -- Minimum available pods set in PodDisruptionBudget. + # Define either 'minAvailable' or 'maxUnavailable', never both. + minAvailable: 1 + # -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored. + # maxUnavailable: 1 -endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + annotations: {} + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + # fallback: + # failureThreshold: 3 + # replicas: 11 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + service: + # -- Enable controller services or not. This does not influence the creation of either the admission webhook or the metrics service. + enabled: true + external: + # -- Enable the external controller service or not. Useful for internal-only deployments. + enabled: true + # -- Annotations to be added to the external controller service. See `controller.service.internal.annotations` for annotations to be added to the internal controller service. + annotations: {} + # -- Labels to be added to both controller services. + labels: {} + # -- Type of the external controller service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: LoadBalancer + # -- Pre-defined cluster internal IP address of the external controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIP: "" + # -- List of node IP addresses at which the external controller service is available. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + externalIPs: [] + # -- Deprecated: Pre-defined IP address of the external controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + # -- Restrict access to the external controller service. Values must be CIDRs. Allows any source address by default. + loadBalancerSourceRanges: [] + # -- Load balancer class of the external controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + # -- Enable node port allocation for the external controller service or not. Applies to type `LoadBalancer` only. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + # allocateLoadBalancerNodePorts: true + + # -- External traffic policy of the external controller service. Set to "Local" to preserve source IP on providers supporting it. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: "" + # -- Session affinity of the external controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". + # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + # -- Specifies the health check node port (numeric port number) for the external controller service. + # If not specified, the service controller allocates a port from your cluster's node port range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack capabilities of the external controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. + # Fields `ipFamilies` and `clusterIP` depend on the value of this field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilyPolicy: SingleStack + # -- List of IP families (e.g. IPv4, IPv6) assigned to the external controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilies: + - IPv4 + # -- Enable the HTTP listener on both controller services or not. + enableHttp: true + # -- Enable the HTTPS listener on both controller services or not. + enableHttps: true + ports: + # -- Port the external HTTP listener is published with. + http: 80 + # -- Port the external HTTPS listener is published with. + https: 443 + targetPorts: + # -- Port of the ingress controller the external HTTP listener is mapped to. + http: http + # -- Port of the ingress controller the external HTTPS listener is mapped to. + https: https + # -- Declare the app protocol of the external HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + appProtocol: true + nodePorts: + # -- Node port allocated for the external HTTP listener. If left empty, the service controller allocates one from the configured node port range. + http: "" + # -- Node port allocated for the external HTTPS listener. If left empty, the service controller allocates one from the configured node port range. + https: "" + # -- Node port mapping for external TCP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # tcp: + # 8080: 30080 + tcp: {} + # -- Node port mapping for external UDP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # udp: + # 53: 30053 + udp: {} + internal: + # -- Enable the internal controller service or not. Remember to configure `controller.service.internal.annotations` when enabling this. enabled: false - ingress: - username: ingress - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - ingress: - hosts: - default: ingress - error_pages: ingress-error-pages - host_fqdn_override: - default: null - # NOTE: The values under .endpoints.ingress.host_fqdn_override.public.tls - # will be used for the default SSL certificate. - # See also the .conf.default_ssl_certificate options below. - public: - tls: - crt: "" - key: "" - port: - http: - default: 80 - https: - default: 443 - healthz: - default: 10254 - status: - default: 10246 - stream: - default: 10247 - profiler: - default: 10245 - server: - default: 8181 - ingress_exporter: - namespace: null - hosts: - default: ingress-exporter - host_fqdn_override: - default: null - path: - default: null - scheme: - default: 'http' - port: - metrics: - default: 10254 - kube_dns: - namespace: kube-system - name: kubernetes-dns - hosts: - default: kube-dns - host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns_tcp: - default: 53 - dns: - default: 53 - protocol: UDP - -network_policy: - ingress: - ingress: - - {} - egress: - - {} - -secrets: - oci_image_registry: - ingress: ingress-oci-image-registry-key - tls: - ingress: - api: - # .secrets.tls.ingress.api.public="name of the TLS secret to create for the default cert" - # NOTE: The contents of the secret are from .endpoints.ingress.host_fqdn_override.public.tls - public: default-tls-public - dhparam: - secret_dhparam: | -conf: - ingress: - enable-underscores-in-headers: "true" - # NOTE(portdirect): if left blank this is populated from - # .network.vip.addr when running in host networking - # and .network.vip.manage=true, otherwise it is left as - # an empty string (the default). - bind-address: null - enable-vts-status: "true" - server-tokens: "false" - ssl-dh-param: openstack/secret-dhparam - worker-processes: "4" - # This block sets the --default-ssl-certificate option - # https://kubernetes.github.io/ingress-nginx/user-guide/tls/#default-ssl-certificate - default_ssl_certificate: - # .conf.default_ssl_certificate.enabled=true: use a default certificate + # -- Annotations to be added to the internal controller service. Mandatory for the internal controller service to be created. Varies with the cloud service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + annotations: {} + # -- Type of the internal controller service. + # Defaults to the value of `controller.service.type`. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: "" + # -- Pre-defined cluster internal IP address of the internal controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIP: "" + # -- List of node IP addresses at which the internal controller service is available. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + externalIPs: [] + # -- Deprecated: Pre-defined IP address of the internal controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + # -- Restrict access to the internal controller service. Values must be CIDRs. Allows any source address by default. + loadBalancerSourceRanges: [] + # -- Load balancer class of the internal controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + # -- Enable node port allocation for the internal controller service or not. Applies to type `LoadBalancer` only. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + # allocateLoadBalancerNodePorts: true + + # -- External traffic policy of the internal controller service. Set to "Local" to preserve source IP on providers supporting it. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: "" + # -- Session affinity of the internal controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". + # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + # -- Specifies the health check node port (numeric port number) for the internal controller service. + # If not specified, the service controller allocates a port from your cluster's node port range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack capabilities of the internal controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. + # Fields `ipFamilies` and `clusterIP` depend on the value of this field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilyPolicy: SingleStack + # -- List of IP families (e.g. IPv4, IPv6) assigned to the internal controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilies: + - IPv4 + ports: {} + # -- Port the internal HTTP listener is published with. + # Defaults to the value of `controller.service.ports.http`. + # http: 80 + # -- Port the internal HTTPS listener is published with. + # Defaults to the value of `controller.service.ports.https`. + # https: 443 + + targetPorts: {} + # -- Port of the ingress controller the internal HTTP listener is mapped to. + # Defaults to the value of `controller.service.targetPorts.http`. + # http: http + # -- Port of the ingress controller the internal HTTPS listener is mapped to. + # Defaults to the value of `controller.service.targetPorts.https`. + # https: https + + # -- Declare the app protocol of the internal HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + appProtocol: true + nodePorts: + # -- Node port allocated for the internal HTTP listener. If left empty, the service controller allocates one from the configured node port range. + http: "" + # -- Node port allocated for the internal HTTPS listener. If left empty, the service controller allocates one from the configured node port range. + https: "" + # -- Node port mapping for internal TCP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # tcp: + # 8080: 30080 + tcp: {} + # -- Node port mapping for internal UDP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # udp: + # 53: 30053 + udp: {} + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + # -- Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module + extraModules: [] + # - name: mytestmodule + # image: + # registry: registry.k8s.io + # image: ingress-nginx/mytestmodule + # ## for backwards compatibility consider setting the full image url via the repository value below + # ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + # ## repository: + # tag: "v1.0.0" + # digest: "" + # distroless: false + # containerSecurityContext: + # runAsNonRoot: true + # runAsUser: + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # resources: {} + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + opentelemetry: enabled: false - # If referencing an existing TLS secret with the default cert - # .conf.default_ssl_certificate.name="name of the secret" - # (defaults to value of .secrets.tls.ingress.api.public) - # .conf.default_ssl_certificate.namespace="namespace of the secret" - # (optional, defaults to release namespace) + name: opentelemetry + image: + registry: registry.k8s.io + image: ingress-nginx/opentelemetry + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v20230721-3e2062ee5" + digest: sha256:13bee3f5223883d3ca62fee7309ad02d22ec00ff0d7033e3e9aca7a9f60fd472 + distroless: true + containerSecurityContext: + runAsNonRoot: true + # -- The image's default user, inherited from its base image `cgr.dev/chainguard/static`. + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + admissionWebhooks: + name: admission + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + # -- Use an existing PSP instead of creating one + existingPsp: "" + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + createSecretJob: + name: create + # -- Security context for secret creation containers + securityContext: + runAsNonRoot: true + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + patchWebhookJob: + name: patch + # -- Security context for webhook patch containers + securityContext: + runAsNonRoot: true + runAsUser: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.4.1 + digest: sha256:36d05b4077fb8e3d13663702fa337f124675ba8667cbd949c03a8e8ea6fa4366 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + # NetworkPolicy for webhook patch + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + # -- Security context for secret creation & webhook patch pods + securityContext: {} + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + # default to be 5y + duration: "" + admissionCert: + # default to be 1y + duration: "" + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + metrics: + port: 10254 + portName: metrics + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + # -- Labels to be added to the metrics service resource + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + serviceMonitor: + enabled: false + additionalLabels: {} + annotations: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace or namespaceOverride only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # # By default a fake self-signed certificate is generated as default and + # # it is fine if it expires. If `--default-ssl-certificate` flag is used + # # and a valid certificate passed please do not filter for `host` label! + # # (i.e. delete `{host!="_"}` so also the default SSL certificate is + # # checked for expiration) + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds{host!="_"}) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + priorityClassName: "" +# -- Rollback limit +## +revisionHistoryLimit: 10 +## Default 404 backend +## +defaultBackend: + ## + enabled: false + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + runAsNonRoot: true + # nobody user -> uid 65534 + runAsUser: 65534 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + readOnlyRootFilesystem: true + # -- Use an existing PSP instead of creating one + existingPsp: "" + extraArgs: {} + serviceAccount: + create: true name: "" - namespace: "" - # NOTE: To create a new secret to hold the default certificate, leave the - # above values empty, and specify: - # .endpoints.ingress.host_fqdn_override.public.tls.crt="PEM cert data" - # .endpoints.ingress.host_fqdn_override.public.tls.key="PEM key data" - # .manifests.secret_ingress_tls=true - services: - tcp: null - udp: null - -manifests: - configmap_bin: true - configmap_conf: true - configmap_services_tcp: true - configmap_services_udp: true - deployment_error: true - deployment_ingress: true - endpoints_ingress: true - ingress: true - ingressClass: true - secret_ingress_tls: false - secret_dhparam: false - service_error: true - service_ingress: true - job_image_repo_sync: true - monitoring: - prometheus: - service_exporter: true - network_policy: false - secret_registry: true + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + port: 8080 + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + # -- Security context for default backend pods + podSecurityContext: {} + # -- Security context for default backend containers + containerSecurityContext: {} + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + kubernetes.io/os: linux + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + replicaCount: 1 + minAvailable: 1 + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraConfigMaps: [] + ## Additional configmaps to the default backend pod. + # - name: my-extra-configmap-1 + # labels: + # type: config-1 + # data: + # extra_file_1.html: | + # + # - name: my-extra-configmap-2 + # labels: + # type: config-2 + # data: + # extra_file_2.html: | + # + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + # NetworkPolicy for default backend component. + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + service: + annotations: {} + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# "8080": "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# "53": "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: "" diff --git a/kustomize/skyline/base/configmap-bin.yaml b/kustomize/skyline/base/configmap-bin.yaml index a9c0e5ff..d91cb861 100644 --- a/kustomize/skyline/base/configmap-bin.yaml +++ b/kustomize/skyline/base/configmap-bin.yaml @@ -9,7 +9,7 @@ data: #!/bin/bash set -exo pipefail cat /etc/skyline/skyline.yaml - alembic -c /skyline-apiserver/skyline_apiserver/db/alembic/alembic.ini upgrade head + alembic -c /opt/skyline-apiserver/skyline_apiserver/db/alembic/alembic.ini upgrade head data-skyline-run.sh: | set -exo pipefail echo "/usr/local/bin/gunicorn -c /etc/skyline/gunicorn.py skyline_apiserver.main:app" >/run_command diff --git a/kustomize/skyline/base/deployment-apiserver.yaml b/kustomize/skyline/base/deployment-apiserver.yaml index afb5a471..ff822568 100644 --- a/kustomize/skyline/base/deployment-apiserver.yaml +++ b/kustomize/skyline/base/deployment-apiserver.yaml @@ -317,7 +317,7 @@ spec: key: prometheus_endpoint optional: true - name: skyline-apiserver-db-migrate - image: "docker.io/99cloud/skyline:2023.1" + image: "ghcr.io/rackerlabs/skyline-rxt:master-ubuntu_jammy-1718033880" imagePullPolicy: IfNotPresent resources: requests: @@ -340,7 +340,7 @@ spec: readOnly: true containers: - name: skyline-apiserver - image: "docker.io/99cloud/skyline:latest" + image: "ghcr.io/rackerlabs/skyline-rxt:master-ubuntu_jammy-1718033880" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kustomize/topolvm/general/kustomization.yaml b/kustomize/topolvm/general/kustomization.yaml index bd78ebb8..502a5887 100644 --- a/kustomize/topolvm/general/kustomization.yaml +++ b/kustomize/topolvm/general/kustomization.yaml @@ -5,9 +5,6 @@ helmCharts: - name: topolvm releaseName: topolvm valuesInline: - node: - nodeSelector: - node-role.kubernetes.io/control-plane: "" controller: replicaCount: 1 nodeSelector: @@ -38,8 +35,6 @@ helmCharts: mountOptions: [] # lvmd service lvmd: - nodeSelector: - node-role.kubernetes.io/control-plane: "" # lvmd.managed -- If true, set up lvmd service with DaemonSet. managed: true # lvmd.socketName -- Specify socketName. diff --git a/mkdocs.yml b/mkdocs.yml index 3f6942ef..3227ada6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -130,11 +130,11 @@ nav: - Overview: - Architecture: genestack-architecture.md - Components: genestack-components.md - - Quickstart: - - Building Virtual Environments: build-test-envs.md - - Simple Setup: quickstart.md - Deployment Guide: - What is Genestack?: deployment-guide-welcome.md + - Getting Started: + - Building Virtual Environments: build-test-envs.md + - Getting the code: genestack-getting-started.md - Open Infrastructure: - Kubernetes: - k8s-overview.md @@ -186,9 +186,9 @@ nav: - skyline: openstack-skyline.md - Octavia: openstack-octavia.md - Metering: - - PostgreSQL: infrastructure-postgresql.md - - Gnocchi: openstack-gnocchi.md - - Ceilometer: openstack-ceilometer.md + - PostgreSQL: infrastructure-postgresql.md + - Gnocchi: openstack-gnocchi.md + - Ceilometer: openstack-ceilometer.md - Monitoring: - Monitoring Overview: prometheus-monitoring-overview.md - Getting Started: monitoring-getting-started.md @@ -196,6 +196,7 @@ nav: - Kube-OVN Monitoring: prometheus-kube-ovn.md - RabbitMQ Exporter: prometheus-rabbitmq-exporter.md - Memcached Exporter: prometheus-memcached-exporter.md + - MariaDB Exporter: prometheus-mysql-exporter.md - Postgres Exporter: prometheus-postgres-exporter.md - Openstack Exporter: prometheus-openstack-metrics-exporter.md - Blackbox Exporter: prometheus-blackbox-exporter.md