diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000000..e9a948a0dc
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1 @@
+* @stackhpc/kayobe
diff --git a/.github/workflows/tag-and-release.yml b/.github/workflows/tag-and-release.yml
new file mode 100644
index 0000000000..6495be7212
--- /dev/null
+++ b/.github/workflows/tag-and-release.yml
@@ -0,0 +1,12 @@
+---
+name: Tag & Release
+'on':
+ push:
+ branches:
+ - stackhpc/2023.1
+permissions:
+ actions: read
+ contents: write
+jobs:
+ tag-and-release:
+ uses: stackhpc/.github/.github/workflows/tag-and-release.yml@main
diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml
new file mode 100644
index 0000000000..8713f0e02d
--- /dev/null
+++ b/.github/workflows/tox.yml
@@ -0,0 +1,7 @@
+---
+name: Tox Continuous Integration
+'on':
+ pull_request:
+jobs:
+ tox:
+ uses: stackhpc/.github/.github/workflows/tox.yml@main
diff --git a/ansible/filter_plugins/address.py b/ansible/filter_plugins/address.py
index 3757ee8f0e..44ddfa830c 100644
--- a/ansible/filter_plugins/address.py
+++ b/ansible/filter_plugins/address.py
@@ -15,6 +15,7 @@
# limitations under the License.
from kolla_ansible.kolla_address import kolla_address
+from kolla_ansible.kolla_url import kolla_url
from kolla_ansible.put_address_in_context import put_address_in_context
@@ -24,5 +25,6 @@ class FilterModule(object):
def filters(self):
return {
'kolla_address': kolla_address,
+ 'kolla_url': kolla_url,
'put_address_in_context': put_address_in_context,
}
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index 025f074649..dc4988406d 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -202,7 +202,8 @@ keepalived_virtual_router_id: "51"
########################
opensearch_datadir_volume: "opensearch"
-opensearch_internal_endpoint: "{{ internal_protocol }}://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}"
+opensearch_internal_endpoint: "{{ opensearch_address | kolla_url(internal_protocol, opensearch_port) }}"
+opensearch_dashboards_external_fqdn: "{{ kolla_external_fqdn }}"
opensearch_dashboards_user: "opensearch"
opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}"
@@ -292,25 +293,39 @@ neutron_ipam_driver: "internal"
aodh_internal_fqdn: "{{ kolla_internal_fqdn }}"
aodh_external_fqdn: "{{ kolla_external_fqdn }}"
aodh_api_port: "8042"
+aodh_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else aodh_api_port }}"
aodh_api_listen_port: "{{ aodh_api_port }}"
barbican_internal_fqdn: "{{ kolla_internal_fqdn }}"
barbican_external_fqdn: "{{ kolla_external_fqdn }}"
barbican_api_port: "9311"
+barbican_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else barbican_api_port }}"
barbican_api_listen_port: "{{ barbican_api_port }}"
+blazar_internal_fqdn: "{{ kolla_internal_fqdn }}"
+blazar_external_fqdn: "{{ kolla_external_fqdn }}"
blazar_api_port: "1234"
+blazar_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else blazar_api_port }}"
+blazar_api_listen_port: "{{ blazar_api_port }}"
+
+caso_tcp_output_port: "24224"
ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}"
ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}"
ceph_rgw_port: "6780"
+ceph_rgw_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ceph_rgw_port }}"
cinder_internal_fqdn: "{{ kolla_internal_fqdn }}"
cinder_external_fqdn: "{{ kolla_external_fqdn }}"
cinder_api_port: "8776"
+cinder_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cinder_api_port }}"
cinder_api_listen_port: "{{ cinder_api_port }}"
+cloudkitty_internal_fqdn: "{{ kolla_internal_fqdn }}"
+cloudkitty_external_fqdn: "{{ kolla_external_fqdn }}"
cloudkitty_api_port: "8889"
+cloudkitty_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cloudkitty_api_port }}"
+cloudkitty_api_listen_port: "{{ cloudkitty_api_port }}"
collectd_udp_port: "25826"
@@ -320,6 +335,7 @@ designate_internal_fqdn: "{{ kolla_internal_fqdn }}"
designate_external_fqdn: "{{ kolla_external_fqdn }}"
designate_api_port: "9001"
designate_api_listen_port: "{{ designate_api_port }}"
+designate_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else designate_api_port }}"
designate_bind_port: "53"
designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}"
designate_rndc_port: "953"
@@ -331,33 +347,48 @@ etcd_protocol: "{{ 'https' if etcd_enable_tls | bool else 'http' }}"
fluentd_syslog_port: "5140"
+freezer_internal_fqdn: "{{ kolla_internal_fqdn }}"
+freezer_external_fqdn: "{{ kolla_external_fqdn }}"
freezer_api_port: "9090"
+freezer_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else freezer_api_port }}"
+freezer_api_listen_port: "{{ freezer_api_port }}"
glance_internal_fqdn: "{{ kolla_internal_fqdn }}"
glance_external_fqdn: "{{ kolla_external_fqdn }}"
glance_api_port: "9292"
glance_api_listen_port: "{{ glance_api_port }}"
+glance_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else glance_api_port }}"
glance_tls_proxy_stats_port: "9293"
gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}"
gnocchi_external_fqdn: "{{ kolla_external_fqdn }}"
gnocchi_api_port: "8041"
gnocchi_api_listen_port: "{{ gnocchi_api_port }}"
+gnocchi_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else gnocchi_api_port }}"
+grafana_internal_fqdn: "{{ kolla_internal_fqdn }}"
+grafana_external_fqdn: "{{ kolla_external_fqdn }}"
grafana_server_port: "3000"
+grafana_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else grafana_server_port }}"
+grafana_server_listen_port: "{{ grafana_server_port }}"
haproxy_stats_port: "1984"
haproxy_monitor_port: "61313"
+haproxy_ssh_port: "2985"
heat_internal_fqdn: "{{ kolla_internal_fqdn }}"
heat_external_fqdn: "{{ kolla_external_fqdn }}"
heat_api_port: "8004"
heat_api_listen_port: "{{ heat_api_port }}"
+heat_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_port }}"
heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}"
heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}"
heat_api_cfn_port: "8000"
heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}"
+heat_api_cfn_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_cfn_port }}"
+horizon_internal_fqdn: "{{ kolla_internal_fqdn }}"
+horizon_external_fqdn: "{{ kolla_external_fqdn }}"
horizon_port: "80"
horizon_tls_port: "443"
horizon_listen_port: "{{ horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}"
@@ -368,27 +399,41 @@ ironic_internal_fqdn: "{{ kolla_internal_fqdn }}"
ironic_external_fqdn: "{{ kolla_external_fqdn }}"
ironic_api_port: "6385"
ironic_api_listen_port: "{{ ironic_api_port }}"
+ironic_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_api_port }}"
ironic_inspector_internal_fqdn: "{{ kolla_internal_fqdn }}"
ironic_inspector_external_fqdn: "{{ kolla_external_fqdn }}"
ironic_inspector_port: "5050"
+ironic_inspector_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_inspector_port }}"
ironic_inspector_listen_port: "{{ ironic_inspector_port }}"
ironic_http_port: "8089"
iscsi_port: "3260"
-keystone_public_port: "5000"
-keystone_public_listen_port: "{{ keystone_public_port }}"
+keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else keystone_public_listen_port }}"
+keystone_public_listen_port: "5000"
# NOTE(yoctozepto): Admin port settings are kept only for upgrade compatibility.
# TODO(yoctozepto): Remove after Zed.
keystone_admin_port: "35357"
keystone_admin_listen_port: "{{ keystone_admin_port }}"
+keystone_internal_port: "5000"
+keystone_internal_listen_port: "{{ keystone_internal_port }}"
keystone_ssh_port: "8023"
kuryr_port: "23750"
+letsencrypt_webserver_port: "8081"
+
+magnum_internal_fqdn: "{{ kolla_internal_fqdn }}"
+magnum_external_fqdn: "{{ kolla_external_fqdn }}"
magnum_api_port: "9511"
+magnum_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else magnum_api_port }}"
+magnum_api_listen_port: "{{ magnum_api_port }}"
+manila_internal_fqdn: "{{ kolla_internal_fqdn }}"
+manila_external_fqdn: "{{ kolla_external_fqdn }}"
manila_api_port: "8786"
+manila_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else manila_api_port }}"
+manila_api_listen_port: "{{ manila_api_port }}"
mariadb_port: "{{ database_port }}"
mariadb_wsrep_port: "4567"
@@ -409,48 +454,62 @@ mariadb_shard_root_user_prefix: "root_shard_"
mariadb_shard_backup_user_prefix: "backup_shard_"
mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}"
+masakari_internal_fqdn: "{{ kolla_internal_fqdn }}"
+masakari_external_fqdn: "{{ kolla_external_fqdn }}"
masakari_api_port: "15868"
+masakari_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else masakari_api_port }}"
+masakari_api_listen_port: "{{ masakari_api_port }}"
masakari_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
memcached_port: "11211"
+mistral_internal_fqdn: "{{ kolla_internal_fqdn }}"
+mistral_external_fqdn: "{{ kolla_external_fqdn }}"
mistral_api_port: "8989"
+mistral_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else mistral_api_port }}"
+mistral_api_listen_port: "{{ mistral_api_port }}"
-# TODO(dougszu): Remove in A cycle
-monasca_api_port: "8070"
-monasca_log_api_port: "{{ monasca_api_port }}"
-monasca_agent_forwarder_port: "17123"
-monasca_agent_statsd_port: "8125"
-
+murano_internal_fqdn: "{{ kolla_internal_fqdn }}"
+murano_external_fqdn: "{{ kolla_external_fqdn }}"
murano_api_port: "8082"
+murano_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else murano_api_port }}"
+murano_api_listen_port: "{{ murano_api_listen_port }}"
neutron_internal_fqdn: "{{ kolla_internal_fqdn }}"
neutron_external_fqdn: "{{ kolla_external_fqdn }}"
neutron_server_port: "9696"
neutron_server_listen_port: "{{ neutron_server_port }}"
+neutron_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else neutron_server_port }}"
neutron_tls_proxy_stats_port: "9697"
nova_internal_fqdn: "{{ kolla_internal_fqdn }}"
nova_external_fqdn: "{{ kolla_external_fqdn }}"
nova_api_port: "8774"
nova_api_listen_port: "{{ nova_api_port }}"
+nova_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_api_port }}"
+nova_metadata_internal_fqdn: "{{ kolla_internal_fqdn }}"
+nova_metadata_external_fqdn: "{{ kolla_external_fqdn }}"
nova_metadata_port: "8775"
nova_metadata_listen_port: "{{ nova_metadata_port }}"
nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}"
nova_novncproxy_port: "6080"
nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}"
+nova_novncproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_novncproxy_port }}"
nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}"
nova_spicehtml5proxy_port: "6082"
nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}"
+nova_spicehtml5proxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_spicehtml5proxy_port }}"
nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}"
nova_serialproxy_port: "6083"
nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}"
+nova_serialproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_serialproxy_port }}"
nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
octavia_internal_fqdn: "{{ kolla_internal_fqdn }}"
octavia_external_fqdn: "{{ kolla_external_fqdn }}"
octavia_api_port: "9876"
octavia_api_listen_port: "{{ octavia_api_port }}"
+octavia_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else octavia_api_port }}"
octavia_health_manager_port: "5555"
# NOTE: If an external ElasticSearch cluster port is specified,
@@ -458,7 +517,8 @@ octavia_health_manager_port: "5555"
# endpoints. This is for backwards compatibility.
opensearch_port: "{{ elasticsearch_port | default('9200') }}"
opensearch_dashboards_port: "5601"
-opensearch_dashboards_port_external: "{{ opensearch_dashboards_port }}"
+opensearch_dashboards_port_external: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else opensearch_dashboards_port }}"
+opensearch_dashboards_listen_port: "{{ opensearch_dashboards_port }}"
ovn_nb_db_port: "6641"
ovn_sb_db_port: "6642"
@@ -478,8 +538,13 @@ placement_external_fqdn: "{{ kolla_external_fqdn }}"
# Default Placement API port of 8778 already in use
placement_api_port: "8780"
placement_api_listen_port: "{{ placement_api_port }}"
+placement_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else placement_api_port }}"
+prometheus_external_fqdn: "{{ kolla_external_fqdn }}"
+prometheus_internal_fqdn: "{{ kolla_internal_fqdn }}"
prometheus_port: "9091"
+prometheus_listen_port: "{{ prometheus_port }}"
+prometheus_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_port }}"
prometheus_node_exporter_port: "9100"
prometheus_mysqld_exporter_port: "9104"
prometheus_haproxy_exporter_port: "9101"
@@ -492,8 +557,12 @@ prometheus_libvirt_exporter_port: "9177"
prometheus_etcd_integration_port: "{{ etcd_client_port }}"
# Prometheus alertmanager ports
+prometheus_alertmanager_internal_fqdn: "{{ kolla_internal_fqdn }}"
+prometheus_alertmanager_external_fqdn: "{{ kolla_external_fqdn }}"
prometheus_alertmanager_port: "9093"
prometheus_alertmanager_cluster_port: "9094"
+prometheus_alertmanager_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_alertmanager_port }}"
+prometheus_alertmanager_listen_port: "{{ prometheus_alertmanager_port }}"
# Prometheus MSTeams port
prometheus_msteams_port: "9095"
@@ -505,6 +574,9 @@ prometheus_elasticsearch_exporter_port: "9108"
# Prometheus blackbox-exporter ports
prometheus_blackbox_exporter_port: "9115"
+# Prometheus instance label to use for metrics
+prometheus_instance_label:
+
proxysql_admin_port: "6032"
rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}"
@@ -516,22 +588,39 @@ rabbitmq_prometheus_port: "15692"
redis_port: "6379"
redis_sentinel_port: "26379"
+sahara_internal_fqdn: "{{ kolla_internal_fqdn }}"
+sahara_external_fqdn: "{{ kolla_external_fqdn }}"
sahara_api_port: "8386"
+sahara_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else sahara_api_port }}"
+sahara_api_listen_port: "{{ sahara_api_port }}"
senlin_internal_fqdn: "{{ kolla_internal_fqdn }}"
senlin_external_fqdn: "{{ kolla_external_fqdn }}"
senlin_api_port: "8778"
senlin_api_listen_port: "{{ senlin_api_port }}"
+senlin_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else senlin_api_port }}"
-skyline_internal_fqdn: "{{ kolla_internal_fqdn }}"
-skyline_external_fqdn: "{{ kolla_external_fqdn }}"
+skyline_apiserver_internal_fqdn: "{{ kolla_internal_fqdn }}"
+skyline_apiserver_external_fqdn: "{{ kolla_external_fqdn }}"
+skyline_console_internal_fqdn: "{{ kolla_internal_fqdn }}"
+skyline_console_external_fqdn: "{{ kolla_external_fqdn }}"
skyline_apiserver_port: "9998"
skyline_apiserver_listen_port: "{{ skyline_apiserver_port }}"
+skyline_apiserver_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_apiserver_port }}"
skyline_console_port: "9999"
skyline_console_listen_port: "{{ skyline_console_port }}"
+skyline_console_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_console_port }}"
+solum_application_deployment_internal_fqdn: "{{ kolla_internal_fqdn }}"
+solum_application_deployment_external_fqdn: "{{ kolla_external_fqdn }}"
solum_application_deployment_port: "9777"
+solum_application_deployment_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else solum_application_deployment_port }}"
+solum_application_deployment_listen_port: "{{ solum_application_deployment_port }}"
+solum_image_builder_internal_fqdn: "{{ kolla_internal_fqdn }}"
+solum_image_builder_external_fqdn: "{{ kolla_external_fqdn }}"
solum_image_builder_port: "9778"
+solum_image_builder_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else solum_image_builder_port }}"
+solum_image_builder_listen_port: "{{ solum_image_builder_port }}"
storm_nimbus_thrift_port: 6627
storm_supervisor_thrift_port: 6628
@@ -553,21 +642,46 @@ swift_rsync_port: "10873"
syslog_udp_port: "{{ fluentd_syslog_port }}"
+tacker_internal_fqdn: "{{ kolla_internal_fqdn }}"
+tacker_external_fqdn: "{{ kolla_external_fqdn }}"
tacker_server_port: "9890"
+tacker_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else tacker_server_port }}"
+tacker_server_listen_port: "{{ tacker_server_port }}"
+trove_internal_fqdn: "{{ kolla_internal_fqdn }}"
+trove_external_fqdn: "{{ kolla_external_fqdn }}"
trove_api_port: "8779"
+trove_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else trove_api_port }}"
trove_api_listen_port: "{{ trove_api_port }}"
+venus_internal_fqdn: "{{ kolla_internal_fqdn }}"
+venus_external_fqdn: "{{ kolla_external_fqdn }}"
venus_api_port: "10010"
+venus_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else venus_api_port }}"
+venus_api_listen_port: "{{ venus_api_port }}"
+watcher_internal_fqdn: "{{ kolla_internal_fqdn }}"
+watcher_external_fqdn: "{{ kolla_external_fqdn }}"
watcher_api_port: "9322"
+watcher_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else watcher_api_port }}"
+watcher_api_listen_port: "{{ watcher_api_port }}"
zun_api_port: "9517"
+zun_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else zun_api_port }}"
+zun_api_listen_port: "{{ zun_api_port }}"
+zun_wsproxy_internal_fqdn: "{{ kolla_internal_fqdn }}"
+zun_wsproxy_external_fqdn: "{{ kolla_external_fqdn }}"
zun_wsproxy_port: "6784"
zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
zun_cni_daemon_port: "9036"
+zun_internal_fqdn: "{{ kolla_internal_fqdn }}"
+zun_external_fqdn: "{{ kolla_external_fqdn }}"
+vitrage_internal_fqdn: "{{ kolla_internal_fqdn }}"
+vitrage_external_fqdn: "{{ kolla_external_fqdn }}"
vitrage_api_port: "8999"
+vitrage_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else vitrage_api_port }}"
+vitrage_api_listen_port: "{{ vitrage_api_port }}"
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}"
@@ -648,6 +762,7 @@ enable_haproxy_memcached: "no"
enable_aodh: "no"
enable_barbican: "no"
enable_blazar: "no"
+enable_caso: "no"
enable_ceilometer: "no"
enable_ceilometer_ipmi: "no"
enable_ceilometer_prometheus_pushgateway: "no"
@@ -707,6 +822,7 @@ enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool
enable_ironic_pxe_uefi: "no"
enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
enable_kuryr: "no"
+enable_letsencrypt: "no"
enable_magnum: "no"
enable_manila: "no"
enable_manila_backend_generic: "no"
@@ -823,6 +939,7 @@ osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_
rabbitmq_user: "openstack"
rabbitmq_monitoring_user: ""
outward_rabbitmq_user: "openstack"
+outward_rabbitmq_external_fqdn: "{{ kolla_external_fqdn }}"
# Whether to enable TLS encryption for RabbitMQ client-server communication.
rabbitmq_enable_tls: "no"
# CA certificate bundle in RabbitMQ container.
@@ -833,6 +950,8 @@ rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro i
####################
haproxy_user: "openstack"
haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}"
+haproxy_enable_http2: "yes"
+haproxy_http2_protocol: "alpn h2,http/1.1"
kolla_enable_tls_internal: "no"
kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}"
kolla_certificates_dir: "{{ node_config }}/certificates"
@@ -842,6 +961,8 @@ kolla_admin_openrc_cacert: ""
kolla_copy_ca_into_containers: "no"
haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
haproxy_backend_cacert_dir: "/etc/ssl/certs"
+haproxy_single_external_frontend: false
+haproxy_single_external_frontend_public_port: "{{ '443' if kolla_enable_tls_external | bool else '80' }}"
##################
# Backend options
@@ -860,7 +981,8 @@ kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem"
#####################
# ACME client options
#####################
-acme_client_servers: []
+acme_client_lego: "server lego {{ api_interface_address }}:{{ letsencrypt_webserver_port }}"
+acme_client_servers: "{% set arr = [] %}{% if enable_letsencrypt | bool %}{{ arr.append(acme_client_lego) }}{% endif %}{{ arr }}"
####################
# Keystone options
@@ -869,9 +991,9 @@ keystone_internal_fqdn: "{{ kolla_internal_fqdn }}"
keystone_external_fqdn: "{{ kolla_external_fqdn }}"
# TODO(yoctozepto): Remove after Zed. Kept for compatibility only.
-keystone_admin_url: "{{ keystone_internal_url }}"
-keystone_internal_url: "{{ internal_protocol }}://{{ keystone_internal_fqdn | put_address_in_context('url') }}:{{ keystone_public_port }}"
-keystone_public_url: "{{ public_protocol }}://{{ keystone_external_fqdn | put_address_in_context('url') }}:{{ keystone_public_port }}"
+keystone_admin_url: "{{ keystone_internal_fqdn | kolla_url(admin_protocol, keystone_admin_port) }}"
+keystone_internal_url: "{{ keystone_internal_fqdn | kolla_url(internal_protocol, keystone_internal_port) }}"
+keystone_public_url: "{{ keystone_external_fqdn | kolla_url(public_protocol, keystone_public_port) }}"
keystone_admin_user: "admin"
keystone_admin_project: "admin"
@@ -924,8 +1046,8 @@ glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bo
# NOTE(mnasiadka): For use in common role
glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
-glance_internal_endpoint: "{{ internal_protocol }}://{{ glance_internal_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}"
-glance_public_endpoint: "{{ public_protocol }}://{{ glance_external_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}"
+glance_internal_endpoint: "{{ glance_internal_fqdn | kolla_url(internal_protocol, glance_api_port) }}"
+glance_public_endpoint: "{{ glance_external_fqdn | kolla_url(public_protocol, glance_api_public_port) }}"
#######################
# Barbican options
@@ -934,8 +1056,8 @@ glance_public_endpoint: "{{ public_protocol }}://{{ glance_external_fqdn | put_a
barbican_crypto_plugin: "simple_crypto"
barbican_library_path: "/usr/lib/libCryptoki2_64.so"
-barbican_internal_endpoint: "{{ internal_protocol }}://{{ barbican_internal_fqdn | put_address_in_context('url') }}:{{ barbican_api_port }}"
-barbican_public_endpoint: "{{ public_protocol }}://{{ barbican_external_fqdn | put_address_in_context('url') }}:{{ barbican_api_port }}"
+barbican_internal_endpoint: "{{ barbican_internal_fqdn | kolla_url(internal_protocol, barbican_api_port) }}"
+barbican_public_endpoint: "{{ barbican_external_fqdn | kolla_url(public_protocol, barbican_api_public_port) }}"
#################
# Gnocchi options
@@ -985,8 +1107,8 @@ designate_backend_external_bind9_nameservers: ""
# Valid options are [ '', redis ]
designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}"
-designate_internal_endpoint: "{{ internal_protocol }}://{{ designate_internal_fqdn | put_address_in_context('url') }}:{{ designate_api_port }}"
-designate_public_endpoint: "{{ public_protocol }}://{{ designate_external_fqdn | put_address_in_context('url') }}:{{ designate_api_port }}"
+designate_internal_endpoint: "{{ designate_internal_fqdn | kolla_url(internal_protocol, designate_api_port) }}"
+designate_public_endpoint: "{{ designate_external_fqdn | kolla_url(public_protocol, designate_api_public_port) }}"
designate_enable_notifications_sink: "{{ enable_designate | bool }}"
designate_notifications_topic_name: "notifications_designate"
@@ -996,6 +1118,7 @@ designate_notifications_topic_name: "notifications_designate"
#######################
neutron_bgp_router_id: "1.1.1.1"
neutron_bridge_name: "{{ 'br-dvs' if neutron_plugin_agent == 'vmware_dvs' else 'br_dpdk' if enable_ovs_dpdk | bool else 'br-ex' }}"
+neutron_physical_networks: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index }}{% if not loop.last %},{% endif %}{% endfor %}"
# Comma-separated type of enabled ml2 type drivers
neutron_type_drivers: "flat,vlan,vxlan{% if neutron_plugin_agent == 'ovn' %},geneve{% endif %}"
# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers')
@@ -1015,8 +1138,8 @@ neutron_legacy_iptables: "no"
# Enable distributed floating ip for OVN deployments
neutron_ovn_distributed_fip: "no"
-neutron_internal_endpoint: "{{ internal_protocol }}://{{ neutron_internal_fqdn | put_address_in_context('url') }}:{{ neutron_server_port }}"
-neutron_public_endpoint: "{{ public_protocol }}://{{ neutron_external_fqdn | put_address_in_context('url') }}:{{ neutron_server_port }}"
+neutron_internal_endpoint: "{{ neutron_internal_fqdn | kolla_url(internal_protocol, neutron_server_port) }}"
+neutron_public_endpoint: "{{ neutron_external_fqdn | kolla_url(public_protocol, neutron_server_public_port) }}"
# SRIOV physnet:interface mappings when SRIOV is enabled
# "sriovnet1" and tunnel_interface used here as placeholders
@@ -1074,8 +1197,8 @@ enable_nova_horizon_policy_file: "{{ enable_nova }}"
horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
-horizon_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port }}"
-horizon_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_external | bool else horizon_port }}"
+horizon_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}"
+horizon_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}"
###################
# External Ceph options
@@ -1148,6 +1271,8 @@ enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd
enable_prometheus_msteams: "no"
prometheus_alertmanager_user: "admin"
+prometheus_ceph_exporter_interval: "15s"
+prometheus_grafana_user: "grafana"
prometheus_scrape_interval: "60s"
prometheus_openstack_exporter_interval: "{{ prometheus_scrape_interval }}"
prometheus_openstack_exporter_timeout: "45s"
@@ -1159,6 +1284,9 @@ prometheus_openstack_exporter_compute_api_version: "latest"
prometheus_libvirt_exporter_interval: "60s"
prometheus_msteams_webhook_url:
+prometheus_public_endpoint: "{{ prometheus_external_fqdn | kolla_url(public_protocol, prometheus_public_port) }}"
+prometheus_internal_endpoint: "{{ prometheus_internal_fqdn | kolla_url(internal_protocol, prometheus_port) }}"
+
############
# Vitrage
############
@@ -1171,7 +1299,7 @@ enable_vitrage_prometheus_datasource: "{{ enable_prometheus | bool }}"
influxdb_address: "{{ kolla_internal_fqdn }}"
influxdb_datadir_volume: "influxdb"
-influxdb_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ influxdb_http_port }}"
+influxdb_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, influxdb_http_port) }}"
#########################
# Internal Image options
@@ -1201,20 +1329,20 @@ kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_
# telemetry data.
telegraf_enable_docker_input: "no"
-vitrage_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ vitrage_api_port }}"
-vitrage_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ vitrage_api_port }}"
+vitrage_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, vitrage_api_port) }}"
+vitrage_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, vitrage_api_public_port) }}"
####################
# Grafana
####################
-grafana_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ grafana_server_port }}"
-grafana_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ grafana_server_port }}"
+grafana_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, grafana_server_port) }}"
+grafana_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, grafana_server_public_port) }}"
#############
# Ironic
#############
-ironic_internal_endpoint: "{{ internal_protocol }}://{{ ironic_internal_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}"
-ironic_public_endpoint: "{{ public_protocol }}://{{ ironic_external_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}"
+ironic_internal_endpoint: "{{ ironic_internal_fqdn | kolla_url(internal_protocol, ironic_api_port) }}"
+ironic_public_endpoint: "{{ ironic_external_fqdn | kolla_url(public_protocol, ironic_api_public_port) }}"
# Valid options are [ '', redis, etcd ]
ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
@@ -1222,10 +1350,10 @@ ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if e
########
# Swift
########
-swift_internal_base_endpoint: "{{ internal_protocol }}://{{ swift_internal_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}"
+swift_internal_base_endpoint: "{{ swift_internal_fqdn | kolla_url(internal_protocol, swift_proxy_server_port) }}"
swift_internal_endpoint: "{{ swift_internal_base_endpoint }}/v1/AUTH_%(tenant_id)s"
-swift_public_endpoint: "{{ public_protocol }}://{{ swift_external_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}/v1/AUTH_%(tenant_id)s"
+swift_public_endpoint: "{{ swift_external_fqdn | kolla_url(public_protocol, swift_proxy_server_port, '/v1/AUTH_%(tenant_id)s') }}"
##########
# Octavia
@@ -1243,8 +1371,8 @@ octavia_auto_configure: "{{ 'amphora' in octavia_provider_drivers }}"
# on the Octavia woker nodes on the same provider network.
octavia_network_type: "provider"
-octavia_internal_endpoint: "{{ internal_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
-octavia_public_endpoint: "{{ public_protocol }}://{{ octavia_external_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
+octavia_internal_endpoint: "{{ octavia_internal_fqdn | kolla_url(internal_protocol, octavia_api_port) }}"
+octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol, octavia_api_public_port) }}"
###################################
# Identity federation configuration
diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one
index 7f49624116..ef86e952a4 100644
--- a/ansible/inventory/all-in-one
+++ b/ansible/inventory/all-in-one
@@ -18,6 +18,10 @@ localhost ansible_connection=local
[deployment]
localhost ansible_connection=local
+# Caso
+[caso:children]
+monitoring
+
# You can explicitly specify which hosts run each project by updating the
# groups in the sections below. Common services are grouped together.
@@ -210,6 +214,9 @@ control
[venus:children]
monitoring
+[letsencrypt:children]
+loadbalancer
+
# Additional control implemented here. These groups allow you to control which
# services run on which hosts at a per-service level.
#
@@ -741,3 +748,9 @@ venus
[venus-manager:children]
venus
+
+[letsencrypt-webserver:children]
+letsencrypt
+
+[letsencrypt-lego:children]
+letsencrypt
diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode
index 929519b721..dd2da36b93 100644
--- a/ansible/inventory/multinode
+++ b/ansible/inventory/multinode
@@ -42,6 +42,10 @@ monitoring
[tls-backend:children]
control
+# Caso
+[caso:children]
+monitoring
+
# You can explicitly specify which hosts run each project by updating the
# groups in the sections below. Common services are grouped together.
@@ -228,6 +232,9 @@ control
[venus:children]
monitoring
+[letsencrypt:children]
+loadbalancer
+
# Additional control implemented here. These groups allow you to control which
# services run on which hosts at a per-service level.
#
@@ -760,3 +767,9 @@ venus
[venus-manager:children]
venus
+
+[letsencrypt-webserver:children]
+letsencrypt
+
+[letsencrypt-lego:children]
+letsencrypt
diff --git a/ansible/roles/aodh/defaults/main.yml b/ansible/roles/aodh/defaults/main.yml
index 19f56f09eb..e43c824810 100644
--- a/ansible/roles/aodh/defaults/main.yml
+++ b/ansible/roles/aodh/defaults/main.yml
@@ -19,7 +19,8 @@ aodh_services:
enabled: "{{ enable_aodh }}"
mode: "http"
external: true
- port: "{{ aodh_api_port }}"
+ external_fqdn: "{{ aodh_external_fqdn }}"
+ port: "{{ aodh_api_public_port }}"
listen_port: "{{ aodh_api_listen_port }}"
aodh-evaluator:
container_name: aodh_evaluator
@@ -207,8 +208,8 @@ aodh_notifier_extra_volumes: "{{ aodh_extra_volumes }}"
####################
# OpenStack
####################
-aodh_internal_endpoint: "{{ internal_protocol }}://{{ aodh_internal_fqdn | put_address_in_context('url') }}:{{ aodh_api_port }}"
-aodh_public_endpoint: "{{ public_protocol }}://{{ aodh_external_fqdn | put_address_in_context('url') }}:{{ aodh_api_port }}"
+aodh_internal_endpoint: "{{ aodh_internal_fqdn | kolla_url(internal_protocol, aodh_api_port) }}"
+aodh_public_endpoint: "{{ aodh_external_fqdn | kolla_url(public_protocol, aodh_api_public_port) }}"
aodh_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/barbican/defaults/main.yml b/ansible/roles/barbican/defaults/main.yml
index f8629a5a63..2930889805 100644
--- a/ansible/roles/barbican/defaults/main.yml
+++ b/ansible/roles/barbican/defaults/main.yml
@@ -20,7 +20,8 @@ barbican_services:
enabled: "{{ enable_barbican }}"
mode: "http"
external: true
- port: "{{ barbican_api_port }}"
+ external_fqdn: "{{ barbican_external_fqdn }}"
+ port: "{{ barbican_api_public_port }}"
listen_port: "{{ barbican_api_listen_port }}"
tls_backend: "{{ barbican_enable_tls_backend }}"
barbican-keystone-listener:
diff --git a/ansible/roles/blazar/defaults/main.yml b/ansible/roles/blazar/defaults/main.yml
index bdcf00bc5e..aef39c1158 100644
--- a/ansible/roles/blazar/defaults/main.yml
+++ b/ansible/roles/blazar/defaults/main.yml
@@ -14,11 +14,14 @@ blazar_services:
mode: "http"
external: false
port: "{{ blazar_api_port }}"
+ listen_port: "{{ blazar_api_listen_port }}"
blazar_api_external:
enabled: "{{ enable_blazar }}"
mode: "http"
external: true
- port: "{{ blazar_api_port }}"
+ external_fqdn: "{{ blazar_external_fqdn }}"
+ port: "{{ blazar_api_public_port }}"
+ listen_port: "{{ blazar_api_listen_port }}"
blazar-manager:
container_name: blazar_manager
group: blazar-manager
@@ -126,8 +129,8 @@ blazar_manager_extra_volumes: "{{ blazar_extra_volumes }}"
####################
# OpenStack
####################
-blazar_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ blazar_api_port }}/v1"
-blazar_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ blazar_api_port }}/v1"
+blazar_internal_endpoint: "{{ blazar_internal_fqdn | kolla_url(internal_protocol, blazar_api_port, '/v1') }}"
+blazar_public_endpoint: "{{ blazar_external_fqdn | kolla_url(public_protocol, blazar_api_public_port, '/v1') }}"
blazar_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/caso/defaults/main.yml b/ansible/roles/caso/defaults/main.yml
new file mode 100644
index 0000000000..6c51b8e516
--- /dev/null
+++ b/ansible/roles/caso/defaults/main.yml
@@ -0,0 +1,47 @@
+---
+caso_services:
+ caso:
+ container_name: caso
+ group: caso
+ enabled: true
+ image: "{{ caso_image_full }}"
+ volumes:
+ - "{{ node_config_directory }}/caso/:{{ container_config_directory }}/"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "caso_spool:/var/lib/caso"
+ - "caso_ssm_outgoing:/var/spool/apel/outgoing/openstack"
+ - "kolla_logs:/var/log/kolla/"
+ dimensions: "{{ caso_dimensions }}"
+
+####################
+# caso
+####################
+caso_site_name: "kolla_caso"
+caso_logging_debug: "{{ openstack_logging_debug }}"
+caso_log_dir: "/var/log/kolla/caso"
+caso_cron_table: "10 * * * *"
+caso_messengers:
+ - caso.messenger.logstash.LogstashMessenger
+
+####################
+# OpenStack
+####################
+caso_openstack_auth: "{{ openstack_auth }}"
+caso_keystone_user: "caso"
+caso_projects: []
+caso_ks_users_tmpl: >
+ {%- for project in caso_projects -%}
+ - project: "{{ project }}"
+ user: "{{ caso_keystone_user }}"
+ password: "{{ caso_keystone_password }}"
+ role: "admin"
+ {% endfor %}
+caso_ks_users: "{{ caso_ks_users_tmpl | from_yaml if caso_projects else [] }}"
+
+####################
+# Docker
+####################
+caso_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/caso"
+caso_tag: "{{ openstack_tag }}"
+caso_image_full: "{{ caso_image }}:{{ caso_tag }}"
+caso_dimensions: "{{ default_container_dimensions }}"
diff --git a/ansible/roles/caso/handlers/main.yml b/ansible/roles/caso/handlers/main.yml
new file mode 100644
index 0000000000..07cd0f24d4
--- /dev/null
+++ b/ansible/roles/caso/handlers/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Restart caso container
+ vars:
+ service_name: "caso"
+ service: "{{ caso_services[service_name] }}"
+ config_json: "{{ caso_config_jsons.results|selectattr('item.key', 'equalto', service_name)|first }}"
+ caso_container: "{{ check_caso_containers.results|selectattr('item.key', 'equalto', service_name)|first }}"
+ become: true
+ kolla_docker:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ when:
+ - kolla_action != "config"
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+ - config_json.changed | bool
+ or caso_conf.changed | bool
+ or caso_vom_conf.changed | bool
+ or caso_crontab.changed | bool
+ or caso_container.changed | bool
diff --git a/ansible/roles/caso/tasks/check.yml b/ansible/roles/caso/tasks/check.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/caso/tasks/check.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/caso/tasks/config.yml b/ansible/roles/caso/tasks/config.yml
new file mode 100644
index 0000000000..7e4d7eec3a
--- /dev/null
+++ b/ansible/roles/caso/tasks/config.yml
@@ -0,0 +1,90 @@
+---
+- name: Ensuring config directories exist
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+
+- name: Copying over config.json files for services
+ template:
+ src: "{{ item.key }}.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
+ mode: "0660"
+ become: true
+ register: caso_config_jsons
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
+
+- name: Copying over caso config
+ merge_configs:
+ sources:
+ - "{{ role_path }}/templates/caso.conf.j2"
+ - "{{ node_custom_config }}//caso/caso.conf"
+ - "{{ node_custom_config }}/{{ item.key }}/{{ inventory_hostname }}/caso.conf"
+ dest: "{{ node_config_directory }}/{{ item.key }}/caso.conf"
+ mode: "0660"
+ become: true
+ register: caso_conf
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
+
+- name: Copying over caso crontab
+ template:
+ src: "{{ role_path }}/templates/caso.crontab.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/caso.crontab"
+ mode: "0660"
+ become: true
+ register: caso_crontab
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
+
+- name: Copying over caso voms file
+ template:
+ src: "{{ role_path }}/templates/voms.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/voms.json"
+ mode: "0660"
+ become: true
+ register: caso_vom_conf
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
+
+- name: Check caso containers
+ become: true
+ kolla_docker:
+ action: "compare_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ item.value.container_name }}"
+ image: "{{ item.value.image }}"
+ volumes: "{{ item.value.volumes }}"
+ dimensions: "{{ item.value.dimensions }}"
+ register: check_caso_containers
+ when:
+ - kolla_action != "config"
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
+ notify:
+ - Restart caso container
diff --git a/ansible/roles/caso/tasks/deploy.yml b/ansible/roles/caso/tasks/deploy.yml
new file mode 100644
index 0000000000..27c275b7a4
--- /dev/null
+++ b/ansible/roles/caso/tasks/deploy.yml
@@ -0,0 +1,12 @@
+---
+- include_tasks: register.yml
+ when: inventory_hostname in groups['caso']
+
+- include_tasks: config.yml
+ when: inventory_hostname in groups['caso']
+
+- name: Flush handlers
+ meta: flush_handlers
+
+- include_tasks: check.yml
+ when: inventory_hostname in groups['caso']
diff --git a/ansible/roles/caso/tasks/main.yml b/ansible/roles/caso/tasks/main.yml
new file mode 100644
index 0000000000..bc5d1e6257
--- /dev/null
+++ b/ansible/roles/caso/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/caso/tasks/precheck.yml b/ansible/roles/caso/tasks/precheck.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/caso/tasks/precheck.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/caso/tasks/pull.yml b/ansible/roles/caso/tasks/pull.yml
new file mode 100644
index 0000000000..5b08cc879a
--- /dev/null
+++ b/ansible/roles/caso/tasks/pull.yml
@@ -0,0 +1,11 @@
+---
+- name: Pulling caso images
+ become: true
+ kolla_docker:
+ action: "pull_image"
+ common_options: "{{ docker_common_options }}"
+ image: "{{ item.value.image }}"
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ caso_services }}"
diff --git a/ansible/roles/caso/tasks/reconfigure.yml b/ansible/roles/caso/tasks/reconfigure.yml
new file mode 100644
index 0000000000..f670a5b78d
--- /dev/null
+++ b/ansible/roles/caso/tasks/reconfigure.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: deploy.yml
diff --git a/ansible/roles/caso/tasks/register.yml b/ansible/roles/caso/tasks/register.yml
new file mode 100644
index 0000000000..fda4375c10
--- /dev/null
+++ b/ansible/roles/caso/tasks/register.yml
@@ -0,0 +1,7 @@
+---
+- import_role:
+ name: service-ks-register
+ vars:
+ service_ks_register_auth: "{{ caso_openstack_auth }}"
+ service_ks_register_users: "{{ caso_ks_users }}"
+ tags: always
diff --git a/ansible/roles/caso/tasks/upgrade.yml b/ansible/roles/caso/tasks/upgrade.yml
new file mode 100644
index 0000000000..375dcad19b
--- /dev/null
+++ b/ansible/roles/caso/tasks/upgrade.yml
@@ -0,0 +1,5 @@
+---
+- include_tasks: config.yml
+
+- name: Flush handlers
+ meta: flush_handlers
diff --git a/ansible/roles/caso/templates/caso.conf.j2 b/ansible/roles/caso/templates/caso.conf.j2
new file mode 100644
index 0000000000..8065a8690c
--- /dev/null
+++ b/ansible/roles/caso/templates/caso.conf.j2
@@ -0,0 +1,23 @@
+[DEFAULT]
+messengers = {{ caso_messengers|join(', ') }}
+site_name = {{ caso_site_name }}
+projects = {{ caso_projects|join(', ') }}
+debug = {{ caso_logging_debug }}
+log_file = caso.log
+log_dir = {{ caso_log_dir }}
+log_rotation_type = none
+spooldir = /var/lib/caso
+
+[keystone_auth]
+auth_type = password
+auth_url = {{ keystone_internal_url }}
+project_domain_id = {{ default_project_domain_id }}
+username = {{ caso_keystone_user }}
+user_domain_id = {{ default_user_domain_id }}
+password = {{ caso_keystone_password }}
+
+[logstash]
+port = {{ caso_tcp_output_port }}
+
+[ssm]
+output_path = /var/spool/apel/outgoing/openstack
diff --git a/ansible/roles/caso/templates/caso.crontab.j2 b/ansible/roles/caso/templates/caso.crontab.j2
new file mode 100644
index 0000000000..f406d808eb
--- /dev/null
+++ b/ansible/roles/caso/templates/caso.crontab.j2
@@ -0,0 +1 @@
+{{ caso_cron_table }} caso-extract --config-file /etc/caso/caso.conf
diff --git a/ansible/roles/caso/templates/caso.json.j2 b/ansible/roles/caso/templates/caso.json.j2
new file mode 100644
index 0000000000..949c4ca022
--- /dev/null
+++ b/ansible/roles/caso/templates/caso.json.j2
@@ -0,0 +1,41 @@
+{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
+{
+ "command": "{{ cron_cmd }}",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/caso.crontab",
+ "dest": "/var/spool/cron/caso",
+ "owner": "caso",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/caso.conf",
+ "dest": "/etc/caso/caso.conf",
+ "owner": "caso",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/voms.json",
+ "dest": "/etc/caso/voms.json",
+ "owner": "caso",
+ "perm": "0600"
+ }
+ ],
+ "permissions": [
+ {
+ "path": "/var/log/kolla/caso",
+ "owner": "caso:caso",
+ "recurse": true
+ },
+ {
+ "path": "/var/spool/apel/outgoing/openstack",
+ "owner": "caso:caso",
+ "recurse": true
+ },
+ {
+ "path": "/var/lib/caso",
+ "owner": "caso:caso",
+ "recurse": true
+ }
+ ]
+}
diff --git a/ansible/roles/caso/templates/voms.json.j2 b/ansible/roles/caso/templates/voms.json.j2
new file mode 100644
index 0000000000..559eccb765
--- /dev/null
+++ b/ansible/roles/caso/templates/voms.json.j2
@@ -0,0 +1,9 @@
+{
+ "VO FQAN": {
+ "projects": ["local tenant 1", "local tenant 2"]
+ },
+ "VO NAME": {
+ "projects": ["local tenant 3"]
+ }
+}
+
diff --git a/ansible/roles/ceph-rgw/defaults/main.yml b/ansible/roles/ceph-rgw/defaults/main.yml
index dfa53a9106..5957a8697d 100644
--- a/ansible/roles/ceph-rgw/defaults/main.yml
+++ b/ansible/roles/ceph-rgw/defaults/main.yml
@@ -16,7 +16,8 @@ ceph_rgw_services:
enabled: "{{ enable_ceph_rgw_loadbalancer | bool }}"
mode: "http"
external: true
- port: "{{ ceph_rgw_port }}"
+ external_fqdn: "{{ ceph_rgw_external_fqdn }}"
+ port: "{{ ceph_rgw_public_port }}"
custom_member_list: "{{ ceph_rgw_haproxy_members }}"
####################
@@ -59,8 +60,8 @@ ceph_rgw_swift_account_in_url: false
ceph_rgw_endpoint_path: "{{ '/' if ceph_rgw_swift_compatibility | bool else '/swift/' }}v1{% if ceph_rgw_swift_account_in_url | bool %}/AUTH_%(project_id)s{% endif %}"
-ceph_rgw_internal_endpoint: "{{ internal_protocol }}://{{ ceph_rgw_internal_fqdn | put_address_in_context('url') }}:{{ ceph_rgw_port }}{{ ceph_rgw_endpoint_path }}"
-ceph_rgw_public_endpoint: "{{ public_protocol }}://{{ ceph_rgw_external_fqdn | put_address_in_context('url') }}:{{ ceph_rgw_port }}{{ ceph_rgw_endpoint_path }}"
+ceph_rgw_internal_endpoint: "{{ ceph_rgw_internal_fqdn | kolla_url(internal_protocol, ceph_rgw_port, ceph_rgw_endpoint_path) }}"
+ceph_rgw_public_endpoint: "{{ ceph_rgw_external_fqdn | kolla_url(public_protocol, ceph_rgw_public_port, ceph_rgw_endpoint_path) }}"
ceph_rgw_keystone_user: "ceph_rgw"
diff --git a/ansible/roles/certificates/tasks/generate.yml b/ansible/roles/certificates/tasks/generate.yml
index b38f8ab41f..cf24d969ef 100644
--- a/ansible/roles/certificates/tasks/generate.yml
+++ b/ansible/roles/certificates/tasks/generate.yml
@@ -67,6 +67,7 @@
dest: "{{ kolla_external_fqdn_cert }}"
mode: "0660"
when:
+ - not enable_letsencrypt | bool
- kolla_enable_tls_external | bool
- block:
@@ -77,6 +78,7 @@
remote_src: yes
mode: "0660"
when:
+ - not enable_letsencrypt | bool
- kolla_enable_tls_external | bool
- kolla_enable_tls_internal | bool
- kolla_same_external_internal_vip | bool
@@ -137,5 +139,6 @@
dest: "{{ kolla_internal_fqdn_cert }}"
mode: "0660"
when:
+ - not enable_letsencrypt | bool
- kolla_enable_tls_internal | bool
- not kolla_same_external_internal_vip | bool
diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml
index e1ec9dc8d5..2a5c6c2199 100644
--- a/ansible/roles/cinder/defaults/main.yml
+++ b/ansible/roles/cinder/defaults/main.yml
@@ -20,7 +20,8 @@ cinder_services:
enabled: "{{ enable_cinder }}"
mode: "http"
external: true
- port: "{{ cinder_api_port }}"
+ external_fqdn: "{{ cinder_external_fqdn }}"
+ port: "{{ cinder_api_public_port }}"
listen_port: "{{ cinder_api_listen_port }}"
tls_backend: "{{ cinder_enable_tls_backend }}"
cinder-scheduler:
@@ -209,8 +210,8 @@ cinder_enable_conversion_tmpfs: false
####################
# OpenStack
####################
-cinder_internal_base_endpoint: "{{ internal_protocol }}://{{ cinder_internal_fqdn | put_address_in_context('url') }}:{{ cinder_api_port }}"
-cinder_public_base_endpoint: "{{ public_protocol }}://{{ cinder_external_fqdn | put_address_in_context('url') }}:{{ cinder_api_port }}"
+cinder_internal_base_endpoint: "{{ cinder_internal_fqdn | kolla_url(internal_protocol, cinder_api_port) }}"
+cinder_public_base_endpoint: "{{ cinder_external_fqdn | kolla_url(public_protocol, cinder_api_public_port) }}"
cinder_v3_internal_endpoint: "{{ cinder_internal_base_endpoint }}/v3/%(tenant_id)s"
cinder_v3_public_endpoint: "{{ cinder_public_base_endpoint }}/v3/%(tenant_id)s"
diff --git a/ansible/roles/cloudkitty/defaults/main.yml b/ansible/roles/cloudkitty/defaults/main.yml
index 5231b2f22a..0735b4a67e 100644
--- a/ansible/roles/cloudkitty/defaults/main.yml
+++ b/ansible/roles/cloudkitty/defaults/main.yml
@@ -14,11 +14,14 @@ cloudkitty_services:
mode: "http"
external: false
port: "{{ cloudkitty_api_port }}"
+ listen_port: "{{ cloudkitty_api_listen_port }}"
cloudkitty_api_external:
enabled: "{{ enable_cloudkitty }}"
mode: "http"
external: true
- port: "{{ cloudkitty_api_port }}"
+ external_fqdn: "{{ cloudkitty_external_fqdn }}"
+ port: "{{ cloudkitty_api_public_port }}"
+ listen_port: "{{ cloudkitty_api_listen_port }}"
cloudkitty-processor:
container_name: "cloudkitty_processor"
group: "cloudkitty-processor"
@@ -118,8 +121,8 @@ cloudkitty_api_extra_volumes: "{{ cloudkitty_extra_volumes }}"
####################
# OpenStack
####################
-cloudkitty_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ cloudkitty_api_port }}"
-cloudkitty_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ cloudkitty_api_port }}"
+cloudkitty_internal_endpoint: "{{ cloudkitty_internal_fqdn | kolla_url(internal_protocol, cloudkitty_api_port) }}"
+cloudkitty_public_endpoint: "{{ cloudkitty_external_fqdn | kolla_url(public_protocol, cloudkitty_api_public_port) }}"
cloudkitty_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2 b/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2
index 29459549ea..b2f209c2ba 100644
--- a/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2
+++ b/ansible/roles/cloudkitty/templates/cloudkitty.conf.j2
@@ -72,6 +72,8 @@ region_name = {{ openstack_region_name }}
{% if cloudkitty_collector_backend == "prometheus" %}
[collector_prometheus]
prometheus_url = {{ cloudkitty_prometheus_url }}
+prometheus_user = admin
+prometheus_password = {{ prometheus_password }}
{% if cloudkitty_prometheus_cafile is defined %}
cafile = {{ cloudkitty_prometheus_cafile }}
@@ -96,6 +98,8 @@ region_name = {{ openstack_region_name }}
metric = openstack_identity_project_info
scope_attribute = id
prometheus_url = {{ cloudkitty_prometheus_url }}
+prometheus_user = admin
+prometheus_password = {{ prometheus_password }}
{% if cloudkitty_prometheus_cafile is defined %}
cafile = {{ cloudkitty_prometheus_cafile }}
diff --git a/ansible/roles/common/tasks/config.yml b/ansible/roles/common/tasks/config.yml
index 77213486f1..3361b7a573 100644
--- a/ansible/roles/common/tasks/config.yml
+++ b/ansible/roles/common/tasks/config.yml
@@ -86,6 +86,8 @@
default_input_files:
- name: "conf/input/00-global.conf.j2"
enabled: true
+ - name: "conf/input/99-caso.conf.j2"
+ enabled: true
- name: "conf/input/01-syslog.conf.j2"
enabled: true
- name: "conf/input/02-mariadb.conf.j2"
@@ -100,6 +102,8 @@
enabled: "{{ enable_prometheus_fluentd_integration | bool }}"
- name: "conf/input/10-openvswitch.conf.j2"
enabled: true
+ - name: "conf/input/11-letsencrypt.conf.j2"
+ enabled: "{{ enable_letsencrypt | bool }}"
customised_input_files: "{{ find_custom_fluentd_inputs.files | map(attribute='path') | list }}"
# Filters
fluentd_filter_files: "{{ default_filter_files | customise_fluentd(customised_filter_files) }}"
@@ -148,6 +152,7 @@
- { name: "barbican", enabled: "{{ enable_barbican | bool }}" }
- { name: "blazar", enabled: "{{ enable_blazar | bool }}" }
- { name: "ceilometer", enabled: "{{ enable_ceilometer | bool }}" }
+ - { name: "caso", enabled: "{{ enable_caso | bool }}" }
- { name: "cinder", enabled: "{{ enable_cinder | bool }}" }
- { name: "cloudkitty", enabled: "{{ enable_cloudkitty | bool }}" }
- { name: "collectd", enabled: "{{ enable_collectd | bool }}" }
diff --git a/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2 b/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
index ffdf37c27b..4fb8fa3487 100644
--- a/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
+++ b/ansible/roles/common/templates/conf/filter/00-record_transformer.conf.j2
@@ -50,3 +50,12 @@
log_level ${tag_parts[1]}
+
+{% if enable_caso | bool and inventory_hostname in groups['caso'] %}
+
+ @type parser
+ format json
+ key_name Payload
+ reserve_data true
+
+{% endif %}
\ No newline at end of file
diff --git a/ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2 b/ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2
new file mode 100644
index 0000000000..ad2f071028
--- /dev/null
+++ b/ansible/roles/common/templates/conf/input/11-letsencrypt.conf.j2
@@ -0,0 +1,15 @@
+
diff --git a/ansible/roles/common/templates/conf/input/99-caso.conf.j2 b/ansible/roles/common/templates/conf/input/99-caso.conf.j2
new file mode 100644
index 0000000000..5c577de410
--- /dev/null
+++ b/ansible/roles/common/templates/conf/input/99-caso.conf.j2
@@ -0,0 +1,8 @@
+
diff --git a/ansible/roles/common/templates/conf/output/03-opensearch.conf.j2 b/ansible/roles/common/templates/conf/output/03-opensearch.conf.j2
index e40b3f98cb..79d422bd3f 100644
--- a/ansible/roles/common/templates/conf/output/03-opensearch.conf.j2
+++ b/ansible/roles/common/templates/conf/output/03-opensearch.conf.j2
@@ -1,3 +1,17 @@
+{% if enable_caso | bool and inventory_hostname in groups['caso'] %}
+
+ @type copy
+
+ @type opensearch
+ host { opensearch_address }}
+ port {{ opensearch_port }}
+ logstash_format true
+ logstash_prefix apel
+ flush_interval 15s
+
+
+{% endif %}
+
@type copy
diff --git a/ansible/roles/common/templates/cron-logrotate-caso.conf.j2 b/ansible/roles/common/templates/cron-logrotate-caso.conf.j2
new file mode 100644
index 0000000000..2d4642e4b5
--- /dev/null
+++ b/ansible/roles/common/templates/cron-logrotate-caso.conf.j2
@@ -0,0 +1,3 @@
+"/var/log/kolla/caso/*.log"
+{
+}
diff --git a/ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2 b/ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2
new file mode 100644
index 0000000000..fea08e0163
--- /dev/null
+++ b/ansible/roles/common/templates/cron-logrotate-letsencrypt.conf.j2
@@ -0,0 +1,3 @@
+"/var/log/kolla/letsencrypt/*.log"
+{
+}
diff --git a/ansible/roles/cyborg/defaults/main.yml b/ansible/roles/cyborg/defaults/main.yml
index f736b341f2..c62f3b9117 100644
--- a/ansible/roles/cyborg/defaults/main.yml
+++ b/ansible/roles/cyborg/defaults/main.yml
@@ -141,8 +141,8 @@ cyborg_conductor_extra_volumes: "{{ cyborg_extra_volumes }}"
####################
# OpenStack
####################
-cyborg_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ cyborg_api_port }}/v2"
-cyborg_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ cyborg_api_port }}/v2"
+cyborg_internal_endpoint: "{{ cyborg_internal_fqdn | kolla_url(internal_protocol, cyborg_api_port) }}"
+cyborg_public_endpoint: "{{ cyborg_external_fqdn | kolla_url(public_protocol, cyborg_api_port) }}"
cyborg_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/designate/defaults/main.yml b/ansible/roles/designate/defaults/main.yml
index 212b6dc7d8..ed66c8bab5 100644
--- a/ansible/roles/designate/defaults/main.yml
+++ b/ansible/roles/designate/defaults/main.yml
@@ -19,7 +19,8 @@ designate_services:
enabled: "{{ enable_designate }}"
mode: "http"
external: true
- port: "{{ designate_api_port }}"
+ external_fqdn: "{{ designate_external_fqdn }}"
+ port: "{{ designate_api_public_port }}"
listen_port: "{{ designate_api_listen_port }}"
designate-backend-bind9:
container_name: designate_backend_bind9
diff --git a/ansible/roles/freezer/defaults/main.yml b/ansible/roles/freezer/defaults/main.yml
index 716fdc4326..27390cd63c 100644
--- a/ansible/roles/freezer/defaults/main.yml
+++ b/ansible/roles/freezer/defaults/main.yml
@@ -13,11 +13,14 @@ freezer_services:
mode: "http"
external: false
port: "{{ freezer_api_port }}"
+ listen_port: "{{ freezer_api_listen_port }}"
freezer_api_external:
enabled: "{{ enable_freezer }}"
mode: "http"
external: true
- port: "{{ freezer_api_port }}"
+ external_fqdn: "{{ freezer_external_fqdn }}"
+ port: "{{ freezer_api_public_port }}"
+ listen_port: "{{ freezer_api_listen_port }}"
freezer-scheduler:
container_name: freezer_scheduler
group: freezer-scheduler
@@ -97,8 +100,8 @@ freezer_scheduler_extra_volumes: "{{ freezer_extra_volumes }}"
####################
# OpenStack
####################
-freezer_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ freezer_api_port }}"
-freezer_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ freezer_api_port }}"
+freezer_internal_endpoint: "{{ freezer_internal_fqdn | kolla_url(internal_protocol, freezer_api_port) }}"
+freezer_public_endpoint: "{{ freezer_external_fqdn | kolla_url(public_protocol, freezer_api_public_port) }}"
freezer_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/glance/defaults/main.yml b/ansible/roles/glance/defaults/main.yml
index 0236071b01..6158ef4601 100644
--- a/ansible/roles/glance/defaults/main.yml
+++ b/ansible/roles/glance/defaults/main.yml
@@ -26,7 +26,8 @@ glance_services:
enabled: "{{ enable_glance | bool and not glance_enable_tls_backend | bool }}"
mode: "http"
external: true
- port: "{{ glance_api_port }}"
+ external_fqdn: "{{ glance_external_fqdn }}"
+ port: "{{ glance_api_public_port }}"
frontend_http_extra:
- "timeout client {{ haproxy_glance_api_client_timeout }}"
backend_http_extra:
@@ -57,7 +58,8 @@ glance_services:
enabled: "{{ enable_glance | bool and glance_enable_tls_backend | bool }}"
mode: "http"
external: true
- port: "{{ glance_api_port }}"
+ external_fqdn: "{{ glance_external_fqdn }}"
+ port: "{{ glance_api_public_port }}"
frontend_http_extra:
- "timeout client {{ haproxy_glance_api_client_timeout }}"
backend_http_extra:
diff --git a/ansible/roles/gnocchi/defaults/main.yml b/ansible/roles/gnocchi/defaults/main.yml
index dbb5ef5e34..d6f3111fc5 100644
--- a/ansible/roles/gnocchi/defaults/main.yml
+++ b/ansible/roles/gnocchi/defaults/main.yml
@@ -13,12 +13,15 @@ gnocchi_services:
enabled: "{{ enable_gnocchi }}"
mode: "http"
external: false
- port: "{{ gnocchi_api_listen_port }}"
+ port: "{{ gnocchi_api_port }}"
+ listen_port: "{{ gnocchi_api_listen_port }}"
gnocchi_api_external:
enabled: "{{ enable_gnocchi }}"
mode: "http"
external: true
- port: "{{ gnocchi_api_listen_port }}"
+ external_fqdn: "{{ gnocchi_external_fqdn }}"
+ port: "{{ gnocchi_api_public_port }}"
+ listen_port: "{{ gnocchi_api_listen_port }}"
gnocchi-metricd:
container_name: gnocchi_metricd
group: gnocchi-metricd
@@ -160,8 +163,8 @@ gnocchi_statsd_extra_volumes: "{{ gnocchi_extra_volumes }}"
####################
# OpenStack
####################
-gnocchi_internal_endpoint: "{{ internal_protocol }}://{{ gnocchi_internal_fqdn | put_address_in_context('url') }}:{{ gnocchi_api_port }}"
-gnocchi_public_endpoint: "{{ public_protocol }}://{{ gnocchi_external_fqdn | put_address_in_context('url') }}:{{ gnocchi_api_port }}"
+gnocchi_internal_endpoint: "{{ gnocchi_internal_fqdn | kolla_url(internal_protocol, gnocchi_api_port) }}"
+gnocchi_public_endpoint: "{{ gnocchi_external_fqdn | kolla_url(public_protocol, gnocchi_api_public_port) }}"
gnocchi_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/grafana/defaults/main.yml b/ansible/roles/grafana/defaults/main.yml
index ca7d784936..eae1f03fb4 100644
--- a/ansible/roles/grafana/defaults/main.yml
+++ b/ansible/roles/grafana/defaults/main.yml
@@ -13,11 +13,14 @@ grafana_services:
mode: "http"
external: false
port: "{{ grafana_server_port }}"
+ listen_port: "{{ grafana_server_listen_port }}"
grafana_server_external:
enabled: "{{ enable_grafana_external | bool }}"
mode: "http"
external: true
- port: "{{ grafana_server_port }}"
+ external_fqdn: "{{ grafana_external_fqdn }}"
+ port: "{{ grafana_server_public_port }}"
+ listen_port: "{{ grafana_server_listen_port }}"
####################
# Database
@@ -62,10 +65,11 @@ grafana_data_sources:
access: "proxy"
url: "{{ opensearch_internal_endpoint }}"
jsonData:
- flavor: "elasticsearch"
- database: "[flog-]YYYY.MM.DD"
- version: "7.0.0"
+ flavor: "OpenSearch"
+ database: "{{ opensearch_log_index_prefix }}-*"
+ version: "2.11.1"
timeField: "@timestamp"
+ logLevelField: "log_level"
##########
# Grafana
@@ -80,13 +84,15 @@ grafana_default_volumes:
- "{{ node_config_directory }}/grafana/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- - "grafana:/var/lib/grafana/"
- "kolla_logs:/var/log/kolla/"
grafana_extra_volumes: "{{ default_extra_volumes }}"
grafana_start_first_node_delay: 10
grafana_start_first_node_retries: 12
+# TODO(dawudm): make this True in the D release
+grafana_remove_old_volume: false
+
############
# Prometheus
############
diff --git a/ansible/roles/grafana/tasks/post_config.yml b/ansible/roles/grafana/tasks/post_config.yml
index a59a689ceb..071546dd6e 100644
--- a/ansible/roles/grafana/tasks/post_config.yml
+++ b/ansible/roles/grafana/tasks/post_config.yml
@@ -13,6 +13,13 @@
delay: 2
run_once: true
+- name: Remove old grafana docker volume
+ become: true
+ kolla_docker:
+ action: "remove_volume"
+ name: grafana
+ when: grafana_remove_old_volume | bool
+
- name: Enable grafana datasources
become: true
kolla_toolbox:
diff --git a/ansible/roles/grafana/templates/prometheus.yaml.j2 b/ansible/roles/grafana/templates/prometheus.yaml.j2
index a0b1a4ae19..5615638fb5 100644
--- a/ansible/roles/grafana/templates/prometheus.yaml.j2
+++ b/ansible/roles/grafana/templates/prometheus.yaml.j2
@@ -4,6 +4,10 @@ datasources:
- name: Prometheus
type: prometheus
access: proxy
+ basicAuth: true
+ basicAuthUser: "{{ prometheus_grafana_user }}"
+ secureJsonData:
+ basicAuthPassword: "{{ prometheus_grafana_password }}"
orgId: 1
url: {{ grafana_prometheus_url }}
version: 1
diff --git a/ansible/roles/haproxy-config/defaults/main.yml b/ansible/roles/haproxy-config/defaults/main.yml
index d6456bd2da..ea72d59906 100644
--- a/ansible/roles/haproxy-config/defaults/main.yml
+++ b/ansible/roles/haproxy-config/defaults/main.yml
@@ -5,6 +5,7 @@ haproxy_service_template: "haproxy_single_service_split.cfg.j2"
haproxy_frontend_http_extra:
- "option httplog"
- "option forwardfor"
+haproxy_frontend_redirect_extra: []
haproxy_frontend_tcp_extra:
- "option tcplog"
haproxy_backend_http_extra: []
diff --git a/ansible/roles/haproxy-config/tasks/main.yml b/ansible/roles/haproxy-config/tasks/main.yml
index 2505298905..1e3f293adb 100644
--- a/ansible/roles/haproxy-config/tasks/main.yml
+++ b/ansible/roles/haproxy-config/tasks/main.yml
@@ -22,6 +22,36 @@
notify:
- Restart haproxy container
+- name: "Add configuration for {{ project_name }} when using single external frontend"
+ vars:
+ service: "{{ item.value }}"
+ blockinfile:
+ create: yes
+ path: "{{ node_config_directory }}/haproxy/external-frontend-map"
+ insertafter: EOF
+ marker: "# {mark} {{ item.key }}"
+ mode: "0660"
+ block: |
+ {%- set haproxy = service.haproxy | default({}) %}
+ {%- for haproxy_name, haproxy_service in haproxy.items() %}
+ {% set external = haproxy_service.external | default(false) | bool %}
+ {% set enabled = haproxy_service.enabled | default(false) | bool %}
+ {% set with_frontend = haproxy_service.with_frontend | default(true) | bool %}
+ {% set mode = haproxy_service.mode | default('http') %}
+ {%- if external and with_frontend and enabled and mode == 'http' %}
+ {{ haproxy_service.external_fqdn }} {{ haproxy_name }}_back
+ {% endif -%}
+ {%- endfor -%}
+ become: true
+ with_dict: "{{ project_services }}"
+ when:
+ - haproxy_single_external_frontend | bool
+ - service.enabled | bool
+ - service.haproxy is defined
+ - enable_haproxy | bool
+ notify:
+ - Restart haproxy container
+
- name: "Configuring firewall for {{ project_name }}"
firewalld:
immediate: true
diff --git a/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2 b/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2
index ea4e3321ff..124c04dc92 100644
--- a/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2
+++ b/ansible/roles/haproxy-config/templates/haproxy_single_service_split.cfg.j2
@@ -1,6 +1,8 @@
#jinja2: lstrip_blocks: True
-{%- set external_tls_bind_info = 'ssl crt /etc/haproxy/haproxy.pem' if kolla_enable_tls_external|bool else '' %}
-{%- set internal_tls_bind_info = 'ssl crt /etc/haproxy/haproxy-internal.pem' if kolla_enable_tls_internal|bool else '' %}
+{%- set external_tls_bind_info = 'ssl crt /etc/haproxy/certificates/haproxy.pem' if kolla_enable_tls_external|bool else '' %}
+{%- set external_tls_bind_info = "%s %s" % (external_tls_bind_info, haproxy_http2_protocol) if kolla_enable_tls_external|bool and haproxy_enable_http2|bool else external_tls_bind_info %}
+{%- set internal_tls_bind_info = 'ssl crt /etc/haproxy/certificates/haproxy-internal.pem' if kolla_enable_tls_internal|bool else '' %}
+{%- set internal_tls_bind_info = "%s %s" % (internal_tls_bind_info, haproxy_http2_protocol) if kolla_enable_tls_internal|bool and haproxy_enable_http2|bool else internal_tls_bind_info %}
{%- macro userlist_macro(service_name, auth_user, auth_pass) %}
userlist {{ service_name }}-user
@@ -8,7 +10,7 @@ userlist {{ service_name }}-user
{% endmacro %}
{%- macro frontend_macro(service_name, service_port, service_mode, external,
- frontend_http_extra, frontend_tcp_extra) %}
+ frontend_http_extra, frontend_redirect_extra, frontend_tcp_extra) %}
frontend {{ service_name }}_front
{% if service_mode == 'redirect' %}
mode http
@@ -48,7 +50,10 @@ frontend {{ service_name }}_front
{{ "bind %s:%s %s"|e|format(vip_address, service_port, tls_option)|trim() }}
{# Redirect mode sets a redirect scheme instead of a backend #}
{% if service_mode == 'redirect' %}
- redirect scheme https code 301 if !{ ssl_fc }
+ redirect scheme https code 301 if !{ ssl_fc } !{ path_reg ^/.well-known/acme-challenge/.+ }
+ {% for redirect_option in frontend_redirect_extra %}
+ {{ redirect_option }}
+ {% endfor %}
{% else %}
default_backend {{ service_name }}_back
{% endif %}
@@ -131,6 +136,7 @@ backend {{ service_name }}_back
{% set frontend_tcp_extra = haproxy_service.frontend_tcp_extra|default([]) + haproxy_frontend_tcp_extra %}
{% set backend_tcp_extra = haproxy_service.backend_tcp_extra|default([]) %}
{% set frontend_http_extra = haproxy_service.frontend_http_extra|default([]) + haproxy_frontend_http_extra %}
+ {% set frontend_redirect_extra = haproxy_service.frontend_redirect_extra|default([]) + haproxy_frontend_redirect_extra %}
{% set backend_http_extra = haproxy_service.backend_http_extra|default([]) %}
{% set tls_backend = haproxy_service.tls_backend|default(false) %}
{# Allow for basic auth #}
@@ -140,8 +146,10 @@ backend {{ service_name }}_back
{{ userlist_macro(haproxy_name, auth_user, auth_pass) }}
{% endif %}
{% if with_frontend %}
+ {% if not (external|bool and haproxy_single_external_frontend|bool and mode == 'http') %}
{{ frontend_macro(haproxy_name, haproxy_service.port, mode, external,
- frontend_http_extra, frontend_tcp_extra) }}
+ frontend_http_extra, frontend_redirect_extra, frontend_tcp_extra) }}
+ {% endif %}
{% endif %}
{# Redirect (to https) is a special case, as it does not include a backend #}
{% if with_backend and mode != 'redirect' %}
diff --git a/ansible/roles/heat/defaults/main.yml b/ansible/roles/heat/defaults/main.yml
index bead3e918d..6f230c8156 100644
--- a/ansible/roles/heat/defaults/main.yml
+++ b/ansible/roles/heat/defaults/main.yml
@@ -20,7 +20,8 @@ heat_services:
enabled: "{{ enable_heat }}"
mode: "http"
external: true
- port: "{{ heat_api_port }}"
+ external_fqdn: "{{ heat_external_fqdn }}"
+ port: "{{ heat_api_public_port }}"
listen_port: "{{ heat_api_listen_port }}"
tls_backend: "{{ heat_enable_tls_backend }}"
heat-api-cfn:
@@ -43,7 +44,8 @@ heat_services:
enabled: "{{ enable_heat }}"
mode: "http"
external: true
- port: "{{ heat_api_cfn_port }}"
+ external_fqdn: "{{ heat_cfn_external_fqdn }}"
+ port: "{{ heat_api_cfn_public_port }}"
listen_port: "{{ heat_api_cfn_listen_port }}"
tls_backend: "{{ heat_enable_tls_backend }}"
heat-engine:
@@ -170,12 +172,12 @@ heat_engine_extra_volumes: "{{ heat_extra_volumes }}"
####################
# OpenStack
####################
-heat_internal_endpoint: "{{ internal_protocol }}://{{ heat_internal_fqdn | put_address_in_context('url') }}:{{ heat_api_port }}/v1/%(tenant_id)s"
-heat_public_endpoint: "{{ public_protocol }}://{{ heat_external_fqdn | put_address_in_context('url') }}:{{ heat_api_port }}/v1/%(tenant_id)s"
+heat_internal_endpoint: "{{ heat_internal_fqdn | kolla_url(internal_protocol, heat_api_port, '/v1/%(tenant_id)s') }}"
+heat_public_endpoint: "{{ heat_external_fqdn | kolla_url(public_protocol, heat_api_public_port, '/v1/%(tenant_id)s') }}"
-heat_cfn_public_base_endpoint: "{{ public_protocol }}://{{ heat_cfn_external_fqdn | put_address_in_context('url') }}:{{ heat_api_cfn_port }}"
+heat_cfn_public_base_endpoint: "{{ heat_cfn_external_fqdn | kolla_url(public_protocol, heat_api_cfn_public_port) }}"
-heat_cfn_internal_endpoint: "{{ internal_protocol }}://{{ heat_cfn_internal_fqdn | put_address_in_context('url') }}:{{ heat_api_cfn_port }}/v1"
+heat_cfn_internal_endpoint: "{{ heat_cfn_internal_fqdn | kolla_url(internal_protocol, heat_api_cfn_port, '/v1') }}"
heat_cfn_public_endpoint: "{{ heat_cfn_public_base_endpoint }}/v1"
heat_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml
index 1ed0373524..c1c5dc9791 100644
--- a/ansible/roles/horizon/defaults/main.yml
+++ b/ansible/roles/horizon/defaults/main.yml
@@ -49,10 +49,13 @@ horizon_services:
external: false
port: "{{ horizon_port }}"
listen_port: "{{ horizon_listen_port }}"
+ frontend_redirect_extra:
+ - "use_backend acme_client_back if { path_reg ^/.well-known/acme-challenge/.+ }"
horizon_external:
enabled: "{{ enable_horizon }}"
mode: "http"
external: true
+ external_fqdn: "{{ horizon_external_fqdn }}"
port: "{% if kolla_enable_tls_external | bool %}{{ horizon_tls_port }}{% else %}{{ horizon_port }}{% endif %}"
listen_port: "{{ horizon_listen_port }}"
frontend_http_extra:
@@ -64,8 +67,11 @@ horizon_services:
enabled: "{{ enable_horizon | bool and kolla_enable_tls_external | bool }}"
mode: "redirect"
external: true
+ external_fqdn: "{{ horizon_external_fqdn }}"
port: "{{ horizon_port }}"
listen_port: "{{ horizon_listen_port }}"
+ frontend_redirect_extra:
+ - "use_backend acme_client_back if { path_reg ^/.well-known/acme-challenge/.+ }"
acme_client:
enabled: "{{ enable_horizon }}"
with_frontend: false
diff --git a/ansible/roles/ironic/defaults/main.yml b/ansible/roles/ironic/defaults/main.yml
index 7f10606d40..f017438d96 100644
--- a/ansible/roles/ironic/defaults/main.yml
+++ b/ansible/roles/ironic/defaults/main.yml
@@ -20,7 +20,8 @@ ironic_services:
enabled: "{{ enable_ironic }}"
mode: "http"
external: true
- port: "{{ ironic_api_port }}"
+ external_fqdn: "{{ ironic_external_fqdn }}"
+ port: "{{ ironic_api_public_port }}"
listen_port: "{{ ironic_api_listen_port }}"
tls_backend: "{{ ironic_enable_tls_backend }}"
ironic-conductor:
@@ -52,7 +53,8 @@ ironic_services:
enabled: "{{ enable_ironic }}"
mode: "http"
external: true
- port: "{{ ironic_inspector_port }}"
+ external_fqdn: "{{ ironic_inspector_external_fqdn }}"
+ port: "{{ ironic_inspector_public_port }}"
listen_port: "{{ ironic_inspector_listen_port }}"
ironic-tftp:
container_name: ironic_tftp
@@ -259,8 +261,8 @@ ironic_dnsmasq_extra_volumes: "{{ ironic_extra_volumes }}"
####################
ironic_inspector_keystone_user: "ironic-inspector"
-ironic_inspector_internal_endpoint: "{{ internal_protocol }}://{{ ironic_inspector_internal_fqdn | put_address_in_context('url') }}:{{ ironic_inspector_port }}"
-ironic_inspector_public_endpoint: "{{ public_protocol }}://{{ ironic_inspector_external_fqdn | put_address_in_context('url') }}:{{ ironic_inspector_port }}"
+ironic_inspector_internal_endpoint: "{{ ironic_inspector_internal_fqdn | kolla_url(internal_protocol, ironic_inspector_port) }}"
+ironic_inspector_public_endpoint: "{{ ironic_inspector_external_fqdn | kolla_url(public_protocol, ironic_inspector_public_port) }}"
ironic_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/keystone/defaults/main.yml b/ansible/roles/keystone/defaults/main.yml
index 59275d658f..4d29dd4a85 100644
--- a/ansible/roles/keystone/defaults/main.yml
+++ b/ansible/roles/keystone/defaults/main.yml
@@ -14,14 +14,15 @@ keystone_services:
mode: "http"
external: false
tls_backend: "{{ keystone_enable_tls_backend }}"
- port: "{{ keystone_public_port }}"
- listen_port: "{{ keystone_public_listen_port }}"
+ port: "{{ keystone_internal_port }}"
+ listen_port: "{{ keystone_internal_listen_port }}"
backend_http_extra:
- balance "{{ 'source' if enable_keystone_federation | bool else 'roundrobin' }}"
keystone_external:
enabled: "{{ enable_keystone }}"
mode: "http"
external: true
+ external_fqdn: "{{ keystone_external_fqdn }}"
tls_backend: "{{ keystone_enable_tls_backend }}"
port: "{{ keystone_public_port }}"
listen_port: "{{ keystone_public_listen_port }}"
diff --git a/ansible/roles/letsencrypt/defaults/main.yml b/ansible/roles/letsencrypt/defaults/main.yml
new file mode 100644
index 0000000000..4d41fe9d08
--- /dev/null
+++ b/ansible/roles/letsencrypt/defaults/main.yml
@@ -0,0 +1,60 @@
+---
+letsencrypt_services:
+ letsencrypt-lego:
+ container_name: letsencrypt_lego
+ group: letsencrypt-lego
+ enabled: true
+ image: "{{ letsencrypt_lego_image_full }}"
+ volumes: "{{ letsencrypt_lego_default_volumes + letsencrypt_lego_extra_volumes }}"
+ dimensions: "{{ letsencrypt_lego_dimensions }}"
+ letsencrypt-webserver:
+ container_name: letsencrypt_webserver
+ group: letsencrypt-webserver
+ enabled: true
+ image: "{{ letsencrypt_webserver_image_full }}"
+ volumes: "{{ letsencrypt_webserver_default_volumes + letsencrypt_webserver_extra_volumes }}"
+ dimensions: "{{ letsencrypt_webserver_dimensions }}"
+
+
+##############
+# LetsEncrypt
+##############
+letsencrypt_tag: "{{ openstack_tag }}"
+letsencrypt_logging_debug: "{{ openstack_logging_debug }}"
+
+letsencrypt_lego_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/letsencrypt-lego"
+letsencrypt_lego_tag: "{{ letsencrypt_tag }}"
+letsencrypt_lego_image_full: "{{ letsencrypt_lego_image }}:{{ letsencrypt_lego_tag }}"
+
+letsencrypt_webserver_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/letsencrypt-webserver"
+letsencrypt_webserver_tag: "{{ letsencrypt_tag }}"
+letsencrypt_webserver_image_full: "{{ letsencrypt_webserver_image }}:{{ letsencrypt_webserver_tag }}"
+
+letsencrypt_lego_dimensions: "{{ default_container_dimensions }}"
+letsencrypt_webserver_dimensions: "{{ default_container_dimensions }}"
+
+letsencrypt_lego_default_volumes:
+ - "{{ node_config_directory }}/letsencrypt-lego/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "letsencrypt:/etc/letsencrypt"
+ - "kolla_logs:/var/log/kolla/"
+letsencrypt_lego_extra_volumes: "{{ default_extra_volumes }}"
+
+letsencrypt_webserver_default_volumes:
+ - "{{ node_config_directory }}/letsencrypt-webserver/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "letsencrypt:/etc/letsencrypt"
+ - "kolla_logs:/var/log/kolla/"
+letsencrypt_webserver_extra_volumes: "{{ default_extra_volumes }}"
+
+letsencrypt_cert_server: "https://acme-v02.api.letsencrypt.org/directory"
+# attempt to renew Let's Encrypt certificate every 4 hours
+letsencrypt_cron_renew_schedule: "0 */4 * * *"
+# The email used for certificate registration and recovery contact. Required.
+letsencrypt_email: ""
+letsencrypt_cert_valid_days: "30"
+
+letsencrypt_external_fqdns:
+ - "{{ kolla_external_fqdn }}"
+letsencrypt_internal_fqdns:
+ - "{{ kolla_internal_fqdn }}"
diff --git a/ansible/roles/letsencrypt/handlers/main.yml b/ansible/roles/letsencrypt/handlers/main.yml
new file mode 100644
index 0000000000..9e2610c8ce
--- /dev/null
+++ b/ansible/roles/letsencrypt/handlers/main.yml
@@ -0,0 +1,34 @@
+---
+- name: Restart letsencrypt-webserver container
+ vars:
+ service_name: "letsencrypt-webserver"
+ service: "{{ letsencrypt_services[service_name] }}"
+ become: true
+ kolla_docker:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ environment: "{{ service.environment | default(omit) }}"
+ when:
+ - kolla_action != "config"
+
+- name: Restart letsencrypt-lego container
+ vars:
+ service_name: "letsencrypt-lego"
+ service: "{{ letsencrypt_services[service_name] }}"
+ become: true
+ kolla_docker:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ environment: "{{ service.environment | default(omit) }}"
+ when:
+ - kolla_action != "config"
diff --git a/ansible/roles/letsencrypt/tasks/check-containers.yml b/ansible/roles/letsencrypt/tasks/check-containers.yml
new file mode 100644
index 0000000000..6ca67c510c
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/check-containers.yml
@@ -0,0 +1,18 @@
+---
+- name: Check LetsEncrypt containers
+ become: true
+ kolla_docker:
+ action: "compare_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ item.value.container_name }}"
+ image: "{{ item.value.image }}"
+ volumes: "{{ item.value.volumes }}"
+ dimensions: "{{ item.value.dimensions }}"
+ healthcheck: "{{ item.value.healthcheck | default(omit) }}"
+ environment: "{{ item.value.environment | default(omit) }}"
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ letsencrypt_services }}"
+ notify:
+ - "Restart {{ item.key }} container"
diff --git a/ansible/roles/letsencrypt/tasks/config.yml b/ansible/roles/letsencrypt/tasks/config.yml
new file mode 100644
index 0000000000..44011ee7eb
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/config.yml
@@ -0,0 +1,66 @@
+---
+- name: Ensuring config directories exist
+ file:
+ path: "{{ node_config_directory }}/{{ item.key }}"
+ state: "directory"
+ owner: "{{ config_owner_user }}"
+ group: "{{ config_owner_group }}"
+ mode: "0770"
+ become: true
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ letsencrypt_services }}"
+
+- name: Copying over config.json files for services
+ template:
+ src: "{{ item.key }}.json.j2"
+ dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
+ mode: "0660"
+ become: true
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ letsencrypt_services }}"
+ notify:
+ - "Restart {{ item.key }} container"
+
+- name: Copying over letsencrypt-webserver.conf
+ vars:
+ service: "{{ letsencrypt_services['letsencrypt-webserver'] }}"
+ become: true
+ template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/letsencrypt-webserver/letsencrypt-webserver.conf"
+ mode: "0660"
+ with_first_found:
+ - "{{ node_custom_config }}/letsencrypt/{{ inventory_hostname }}/letsencrypt-webserver.conf"
+ - "{{ node_custom_config }}/letsencrypt/letsencrypt-webserver.conf"
+ - "letsencrypt-webserver.conf.j2"
+ when:
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+ notify:
+ - Restart letsencrypt-webserver container
+
+- name: Copying files for letsencrypt-lego
+ vars:
+ service: "{{ letsencrypt_services['letsencrypt-lego'] }}"
+ template:
+ src: "{{ item.src }}"
+ dest: "{{ node_config_directory }}/letsencrypt-lego/{{ item.dest }}"
+ mode: "0660"
+ become: true
+ with_items:
+ - { src: "crontab.j2", dest: "crontab" }
+ - { src: "id_rsa.j2", dest: "id_rsa" }
+ - { src: "letsencrypt-lego-run.sh.j2", dest: "letsencrypt-lego-run.sh" }
+ when:
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+ notify:
+ - Restart letsencrypt-lego container
+
+- include_tasks: copy-certs.yml
+ when:
+ - kolla_copy_ca_into_containers | bool
diff --git a/ansible/roles/letsencrypt/tasks/config_validate.yml b/ansible/roles/letsencrypt/tasks/config_validate.yml
new file mode 100644
index 0000000000..ed97d539c0
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/config_validate.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible/roles/letsencrypt/tasks/copy-certs.yml b/ansible/roles/letsencrypt/tasks/copy-certs.yml
new file mode 100644
index 0000000000..567b23612e
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/copy-certs.yml
@@ -0,0 +1,6 @@
+---
+- name: "Copy certificates and keys for {{ project_name }}"
+ import_role:
+ role: service-cert-copy
+ vars:
+ project_services: "{{ letsencrypt_services }}"
diff --git a/ansible/roles/letsencrypt/tasks/deploy-containers.yml b/ansible/roles/letsencrypt/tasks/deploy-containers.yml
new file mode 100644
index 0000000000..eb24ab5c7a
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/deploy-containers.yml
@@ -0,0 +1,2 @@
+---
+- import_tasks: check-containers.yml
diff --git a/ansible/roles/letsencrypt/tasks/deploy.yml b/ansible/roles/letsencrypt/tasks/deploy.yml
new file mode 100644
index 0000000000..49edff81e3
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/deploy.yml
@@ -0,0 +1,7 @@
+---
+- import_tasks: config.yml
+
+- import_tasks: check-containers.yml
+
+- name: Flush handlers
+ meta: flush_handlers
diff --git a/ansible/roles/letsencrypt/tasks/loadbalancer.yml b/ansible/roles/letsencrypt/tasks/loadbalancer.yml
new file mode 100644
index 0000000000..a9a2a5c4bc
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/loadbalancer.yml
@@ -0,0 +1,7 @@
+---
+- name: "Configure loadbalancer for {{ project_name }}"
+ import_role:
+ name: loadbalancer-config
+ vars:
+ project_services: "{{ letsencrypt_services }}"
+ tags: always
diff --git a/ansible/roles/letsencrypt/tasks/main.yml b/ansible/roles/letsencrypt/tasks/main.yml
new file mode 100644
index 0000000000..bc5d1e6257
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/letsencrypt/tasks/precheck.yml b/ansible/roles/letsencrypt/tasks/precheck.yml
new file mode 100644
index 0000000000..6ad18cb535
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/precheck.yml
@@ -0,0 +1,33 @@
+---
+- name: Get container facts
+ become: true
+ kolla_container_facts:
+ container_engine: "{{ kolla_container_engine }}"
+ name:
+ - letsencrypt_webserver
+ register: container_facts
+
+- name: Checking free port for LetsEncrypt server
+ vars:
+ service: "{{ letsencrypt_services['letsencrypt-webserver'] }}"
+ wait_for:
+ host: "{{ api_interface_address }}"
+ port: "{{ letsencrypt_webserver_port }}"
+ connect_timeout: 1
+ timeout: 1
+ state: stopped
+ when:
+ - container_facts['letsencrypt_webserver'] is not defined
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+
+- name: Validating letsencrypt email variable
+ run_once: true
+ vars:
+ replace: "valid"
+ assert:
+ that: letsencrypt_email | regex_replace('.*@.*$', replace) == "valid"
+ fail_msg: "Letsencrypt contact email value didn't pass validation."
+ when:
+ - enable_letsencrypt | bool
+ - kolla_enable_tls_external | bool
diff --git a/ansible/roles/letsencrypt/tasks/pull.yml b/ansible/roles/letsencrypt/tasks/pull.yml
new file mode 100644
index 0000000000..03283078d6
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/pull.yml
@@ -0,0 +1,11 @@
+---
+- name: Pulling LetsEncrypt images
+ become: true
+ kolla_docker:
+ action: "pull_image"
+ common_options: "{{ docker_common_options }}"
+ image: "{{ item.value.image }}"
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ with_dict: "{{ letsencrypt_services }}"
diff --git a/ansible/roles/letsencrypt/tasks/reconfigure.yml b/ansible/roles/letsencrypt/tasks/reconfigure.yml
new file mode 100644
index 0000000000..5b10a7e111
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/reconfigure.yml
@@ -0,0 +1,2 @@
+---
+- import_tasks: deploy.yml
diff --git a/ansible/roles/letsencrypt/tasks/stop.yml b/ansible/roles/letsencrypt/tasks/stop.yml
new file mode 100644
index 0000000000..9fbda55f16
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/stop.yml
@@ -0,0 +1,6 @@
+---
+- import_role:
+ role: service-stop
+ vars:
+ project_services: "{{ letsencrypt_services }}"
+ service_name: "{{ project_name }}"
diff --git a/ansible/roles/letsencrypt/tasks/upgrade.yml b/ansible/roles/letsencrypt/tasks/upgrade.yml
new file mode 100644
index 0000000000..5b10a7e111
--- /dev/null
+++ b/ansible/roles/letsencrypt/tasks/upgrade.yml
@@ -0,0 +1,2 @@
+---
+- import_tasks: deploy.yml
diff --git a/ansible/roles/letsencrypt/templates/crontab.j2 b/ansible/roles/letsencrypt/templates/crontab.j2
new file mode 100644
index 0000000000..83209f5b46
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/crontab.j2
@@ -0,0 +1,8 @@
+PATH=/usr/local/bin:/usr/bin:/bin
+
+{% if kolla_external_vip_address != kolla_internal_vip_address and kolla_external_fqdn != kolla_external_vip_address %}
+{{ letsencrypt_cron_renew_schedule }} /usr/bin/letsencrypt-certificates --external --fqdns {% for fqdn in letsencrypt_external_fqdns %}{{ fqdn }}{% if not loop.last %},{% endif %}{% endfor %} --days {{ letsencrypt_cert_valid_days }} --port {{ letsencrypt_webserver_port }} --mail {{ letsencrypt_email }} --acme {{ letsencrypt_cert_server }} --vips {% if not kolla_same_external_internal_vip %}{{ kolla_external_vip_address }},{% endif %}{{ kolla_internal_vip_address }} --haproxies-ssh {% for host in groups['loadbalancer'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ haproxy_ssh_port }}{% if not loop.last %},{% endif %}{% endfor %} 2>&1 | tee -a /var/log/kolla/letsencrypt/letsencrypt-lego.log
+{% endif %}
+{% if kolla_external_vip_address == kolla_internal_vip_address and kolla_internal_fqdn != kolla_internal_vip_address %}
+{{ letsencrypt_cron_renew_schedule }} /usr/bin/letsencrypt-certificates --internal --fqdns {% for fqdn in letsencrypt_internal_fqdns %}{{ fqdn }}{% if not loop.last %},{% endif %}{% endfor %} --days {{ letsencrypt_cert_valid_days }} --port {{ letsencrypt_webserver_port }} --mail {{ letsencrypt_email }} --acme {{ letsencrypt_cert_server }} --vips {% if not kolla_same_external_internal_vip %}{{ kolla_external_vip_address }},{% endif %}{{ kolla_internal_vip_address }} --haproxies-ssh {% for host in groups['loadbalancer'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ haproxy_ssh_port }}{% if not loop.last %},{% endif %}{% endfor %} 2>&1 | tee -a /var/log/kolla/letsencrypt/letsencrypt-lego.log
+{% endif %}
diff --git a/ansible/roles/letsencrypt/templates/id_rsa.j2 b/ansible/roles/letsencrypt/templates/id_rsa.j2
new file mode 100644
index 0000000000..9e42a132a3
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/id_rsa.j2
@@ -0,0 +1 @@
+{{ haproxy_ssh_key.private_key }}
diff --git a/ansible/roles/letsencrypt/templates/letsencrypt-lego-run.sh.j2 b/ansible/roles/letsencrypt/templates/letsencrypt-lego-run.sh.j2
new file mode 100644
index 0000000000..3f1282f80c
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/letsencrypt-lego-run.sh.j2
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
+
+{% if kolla_external_vip_address != kolla_internal_vip_address and kolla_external_fqdn != kolla_external_vip_address %}
+/usr/bin/letsencrypt-certificates --external --fqdns {% for fqdn in letsencrypt_external_fqdns %}{{ fqdn }}{% if not loop.last %},{% endif %}{% endfor %} --days {{ letsencrypt_cert_valid_days }} --port {{ letsencrypt_webserver_port }} --mail {{ letsencrypt_email }} --acme {{ letsencrypt_cert_server }} --vips {% if not kolla_same_external_internal_vip %}{{ kolla_external_vip_address }},{% endif %}{{ kolla_internal_vip_address }} --haproxies-ssh {% for host in groups['loadbalancer'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ haproxy_ssh_port }}{% if not loop.last %},{% endif %}{% endfor %} 2>&1 | tee -a /var/log/kolla/letsencrypt/letsencrypt-lego.log
+{% endif %}
+{% if kolla_external_vip_address == kolla_internal_vip_address and kolla_internal_fqdn != kolla_internal_vip_address %}
+/usr/bin/letsencrypt-certificates --internal --fqdns {% for fqdn in letsencrypt_internal_fqdns %}{{ fqdn }}{% if not loop.last %},{% endif %}{% endfor %} --days {{ letsencrypt_cert_valid_days }} --port {{ letsencrypt_webserver_port }} --mail {{ letsencrypt_email }} --acme {{ letsencrypt_cert_server }} --vips {% if not kolla_same_external_internal_vip %}{{ kolla_external_vip_address }},{% endif %}{{ kolla_internal_vip_address }} --haproxies-ssh {% for host in groups['loadbalancer'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ haproxy_ssh_port }}{% if not loop.last %},{% endif %}{% endfor %} 2>&1 | tee -a /var/log/kolla/letsencrypt/letsencrypt-lego.log
+{% endif %}
+
+{{ cron_cmd }}
diff --git a/ansible/roles/letsencrypt/templates/letsencrypt-lego.json.j2 b/ansible/roles/letsencrypt/templates/letsencrypt-lego.json.j2
new file mode 100644
index 0000000000..174f20bdad
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/letsencrypt-lego.json.j2
@@ -0,0 +1,26 @@
+{% set cron_cmd = 'cron -f' if kolla_base_distro in ['ubuntu', 'debian'] else 'crond -s -n' %}
+{% set cron_path = '/var/spool/cron/crontabs/root' if kolla_base_distro in ['ubuntu', 'debian'] else '/var/spool/cron/root' %}
+{
+ "command": "/usr/local/bin/letsencrypt-lego-run.sh",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/letsencrypt-lego-run.sh",
+ "dest": "/usr/local/bin/letsencrypt-lego-run.sh",
+ "owner": "root",
+ "perm": "0700"
+ },
+ {
+ "source": "{{ container_config_directory }}/crontab",
+ "dest": "{{ cron_path }}",
+ "owner": "root",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/id_rsa",
+ "dest": "/var/lib/letsencrypt/.ssh/id_rsa",
+ "owner": "letsencrypt",
+ "perm": "0600"
+ }
+ ]
+}
+
diff --git a/ansible/roles/letsencrypt/templates/letsencrypt-webserver.conf.j2 b/ansible/roles/letsencrypt/templates/letsencrypt-webserver.conf.j2
new file mode 100644
index 0000000000..f2555caad9
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/letsencrypt-webserver.conf.j2
@@ -0,0 +1,19 @@
+Listen {{ api_interface_address }}:8081
+
+ServerSignature Off
+ServerTokens Prod
+TraceEnable off
+KeepAliveTimeout 60
+
+
+ DocumentRoot /etc/letsencrypt/http-01
+ ErrorLog "/var/log/kolla/letsencrypt/letsencrypt-webserver-error.log"
+ CustomLog "/var/log/kolla/letsencrypt/letsencrypt-webserver-access.log" common
+
+
+ Options None
+ AllowOverride None
+ Require all granted
+
+
+
diff --git a/ansible/roles/letsencrypt/templates/letsencrypt-webserver.json.j2 b/ansible/roles/letsencrypt/templates/letsencrypt-webserver.json.j2
new file mode 100644
index 0000000000..5284241643
--- /dev/null
+++ b/ansible/roles/letsencrypt/templates/letsencrypt-webserver.json.j2
@@ -0,0 +1,14 @@
+{% set letsencrypt_apache_dir = 'apache2/conf-enabled' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd/conf.d' %}
+{% set apache_binary = 'apache2' if kolla_base_distro in ['ubuntu', 'debian'] else 'httpd' %}
+
+{
+ "command": "/usr/sbin/{{ apache_binary }} -DFOREGROUND",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/letsencrypt-webserver.conf",
+ "dest": "/etc/{{ letsencrypt_apache_dir }}/letsencrypt-webserver.conf",
+ "owner": "letsencrypt",
+ "perm": "0600"
+ }
+ ]
+}
diff --git a/ansible/roles/letsencrypt/vars/main.yml b/ansible/roles/letsencrypt/vars/main.yml
new file mode 100644
index 0000000000..66b02925f9
--- /dev/null
+++ b/ansible/roles/letsencrypt/vars/main.yml
@@ -0,0 +1,2 @@
+---
+project_name: "letsencrypt"
diff --git a/ansible/roles/loadbalancer/defaults/main.yml b/ansible/roles/loadbalancer/defaults/main.yml
index 188cd7d5e8..9bf4e4136e 100644
--- a/ansible/roles/loadbalancer/defaults/main.yml
+++ b/ansible/roles/loadbalancer/defaults/main.yml
@@ -26,6 +26,14 @@ loadbalancer_services:
privileged: True
volumes: "{{ keepalived_default_volumes + keepalived_extra_volumes }}"
dimensions: "{{ keepalived_dimensions }}"
+ haproxy-ssh:
+ container_name: "haproxy_ssh"
+ group: loadbalancer
+ enabled: "{{ enable_letsencrypt | bool }}"
+ image: "{{ haproxy_ssh_image_full }}"
+ volumes: "{{ haproxy_ssh_default_volumes }}"
+ dimensions: "{{ haproxy_ssh_dimensions }}"
+ healthcheck: "{{ haproxy_ssh_healthcheck }}"
####################
@@ -43,6 +51,10 @@ proxysql_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker
proxysql_tag: "{{ openstack_tag }}"
proxysql_image_full: "{{ proxysql_image }}:{{ proxysql_tag }}"
+haproxy_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/haproxy-ssh"
+haproxy_ssh_tag: "{{ haproxy_tag }}"
+haproxy_ssh_image_full: "{{ haproxy_ssh_image }}:{{ haproxy_ssh_tag }}"
+
syslog_server: "{{ api_interface_address }}"
syslog_haproxy_facility: "local1"
@@ -59,6 +71,7 @@ haproxy_defaults_max_connections: 10000
haproxy_dimensions: "{{ default_container_dimensions }}"
proxysql_dimensions: "{{ default_container_dimensions }}"
keepalived_dimensions: "{{ default_container_dimensions }}"
+haproxy_ssh_dimensions: "{{ default_container_dimensions }}"
haproxy_enable_healthchecks: "{{ enable_container_healthchecks }}"
haproxy_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
@@ -86,11 +99,27 @@ proxysql_healthcheck:
test: "{% if proxysql_enable_healthchecks | bool %}{{ proxysql_healthcheck_test }}{% else %}NONE{% endif %}"
timeout: "{{ proxysql_healthcheck_timeout }}"
+haproxy_ssh_enable_healthchecks: "{{ enable_container_healthchecks }}"
+haproxy_ssh_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
+haproxy_ssh_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
+haproxy_ssh_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
+haproxy_ssh_healthcheck_test: ["CMD-SHELL", "healthcheck_listen sshd {{ haproxy_ssh_port }}"]
+haproxy_ssh_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
+haproxy_ssh_healthcheck:
+ interval: "{{ haproxy_ssh_healthcheck_interval }}"
+ retries: "{{ haproxy_ssh_healthcheck_retries }}"
+ start_period: "{{ haproxy_ssh_healthcheck_start_period }}"
+ test: "{% if haproxy_ssh_enable_healthchecks | bool %}{{ haproxy_ssh_healthcheck_test }}{% else %}NONE{% endif %}"
+ timeout: "{{ haproxy_ssh_healthcheck_timeout }}"
+
+
haproxy_default_volumes:
- "{{ node_config_directory }}/haproxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
- "haproxy_socket:/var/lib/kolla/haproxy/"
+ - "letsencrypt_certificates:/etc/haproxy/certificates"
+
proxysql_default_volumes:
- "{{ node_config_directory }}/proxysql/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
@@ -105,6 +134,13 @@ keepalived_default_volumes:
- "/lib/modules:/lib/modules:ro"
- "{{ 'haproxy_socket:/var/lib/kolla/haproxy/' if enable_haproxy | bool else '' }}"
- "{{ 'proxysql_socket:/var/lib/kolla/proxysql/' if enable_proxysql | bool else '' }}"
+haproxy_ssh_default_volumes:
+ - "{{ node_config_directory }}/haproxy-ssh/:{{ container_config_directory }}/:ro"
+ - "/etc/localtime:/etc/localtime:ro"
+ - "{{ '/etc/timezone:/etc/timezone:ro' if ansible_facts.os_family == 'Debian' else '' }}"
+ - "haproxy_socket:/var/lib/kolla/haproxy/"
+ - "{{ 'letsencrypt:/etc/letsencrypt' if enable_letsencrypt | bool else omit }}"
+ - "{{ 'letsencrypt_certificates:/etc/haproxy/certificates' if enable_letsencrypt | bool else omit }}"
haproxy_extra_volumes: "{{ default_extra_volumes }}"
proxysql_extra_volumes: "{{ default_extra_volumes }}"
@@ -143,10 +179,21 @@ haproxy_defaults_balance: "roundrobin"
haproxy_host_ipv4_tcp_retries2: "KOLLA_UNSET"
# HAProxy socket admin permissions enable
-haproxy_socket_level_admin: "no"
-
+haproxy_socket_level_admin: "{{ enable_letsencrypt | bool }}"
kolla_externally_managed_cert: False
# Allow to disable keepalived tracking script (e.g. for single node environments
# where this proves problematic in some cases)
keepalived_track_script_enabled: True
+
+# Default backend for single external frontend (for missing mappings)
+haproxy_external_single_frontend_default_backend: "horizon_external_back"
+
+haproxy_external_single_frontend_public_port: "443"
+
+haproxy_external_single_frontend_options:
+ - option httplog
+ - option forwardfor
+ - "timeout client {{ haproxy_glance_api_client_timeout }}"
+
+haproxy_glance_api_client_timeout: "6h"
diff --git a/ansible/roles/loadbalancer/handlers/main.yml b/ansible/roles/loadbalancer/handlers/main.yml
index ae0b25b3af..00cb3480ee 100644
--- a/ansible/roles/loadbalancer/handlers/main.yml
+++ b/ansible/roles/loadbalancer/handlers/main.yml
@@ -327,3 +327,19 @@
- service.enabled | bool
listen:
- Wait for virtual IP to appear
+
+- name: Restart haproxy-ssh container
+ vars:
+ service_name: "haproxy-ssh"
+ service: "{{ loadbalancer_services[service_name] }}"
+ become: true
+ kolla_docker:
+ action: "recreate_or_restart_container"
+ common_options: "{{ docker_common_options }}"
+ name: "{{ service.container_name }}"
+ image: "{{ service.image }}"
+ volumes: "{{ service.volumes | reject('equalto', '') | list }}"
+ dimensions: "{{ service.dimensions }}"
+ healthcheck: "{{ service.healthcheck | default(omit) }}"
+ when:
+ - kolla_action != "config"
diff --git a/ansible/roles/loadbalancer/tasks/config.yml b/ansible/roles/loadbalancer/tasks/config.yml
index 964ba194e5..c2270cbee0 100644
--- a/ansible/roles/loadbalancer/tasks/config.yml
+++ b/ansible/roles/loadbalancer/tasks/config.yml
@@ -80,8 +80,10 @@
become: true
with_dict: "{{ loadbalancer_services }}"
when:
+ - keepalived_track_script_enabled | bool
- inventory_hostname in groups[service.group]
- item.key != 'keepalived'
+ - item.key != 'haproxy-ssh'
- not item.value.enabled | bool
or not inventory_hostname in groups[item.value.group]
- service.enabled | bool
@@ -102,6 +104,7 @@
- inventory_hostname in groups[service.group]
- inventory_hostname in groups[item.value.group]
- item.key != 'keepalived'
+ - item.key != 'haproxy-ssh'
- item.value.enabled | bool
- service.enabled | bool
notify:
@@ -156,6 +159,21 @@
notify:
- Restart proxysql container
+- name: Copying over haproxy single external frontend config
+ vars:
+ service: "{{ loadbalancer_services['haproxy'] }}"
+ template:
+ src: "haproxy/haproxy_external_frontend.cfg.j2"
+ dest: "{{ node_config_directory }}/haproxy/services.d/external-frontend.cfg"
+ mode: "0660"
+ become: true
+ when:
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+ - haproxy_single_external_frontend | bool
+ notify:
+ - Restart haproxy container
+
- name: Copying over custom haproxy services configuration
vars:
service: "{{ loadbalancer_services['haproxy'] }}"
@@ -199,6 +217,7 @@
mode: "0660"
become: true
when:
+ - not enable_letsencrypt | bool
- kolla_enable_tls_external | bool
- not kolla_externally_managed_cert | bool
- inventory_hostname in groups[service.group]
@@ -217,6 +236,7 @@
mode: "0660"
become: true
when:
+ - not enable_letsencrypt | bool
- kolla_enable_tls_internal | bool
- not kolla_externally_managed_cert | bool
- inventory_hostname in groups[service.group]
@@ -265,3 +285,20 @@
- "proxysql/proxysql_run.sh.j2"
notify:
- Restart proxysql container
+
+- name: Copying files for haproxy-ssh
+ vars:
+ haproxy_ssh: "{{ loadbalancer_services['haproxy-ssh'] }}"
+ template:
+ src: "{{ item.src }}"
+ dest: "{{ node_config_directory }}/haproxy-ssh/{{ item.dest }}"
+ mode: "0600"
+ become: true
+ with_items:
+ - { src: "haproxy-ssh/sshd_config.j2", dest: "sshd_config" }
+ - { src: "haproxy-ssh/id_rsa.pub", dest: "id_rsa.pub" }
+ when:
+ - inventory_hostname in groups[haproxy_ssh.group]
+ - haproxy_ssh.enabled | bool
+ notify:
+ - Restart haproxy-ssh container
diff --git a/ansible/roles/loadbalancer/tasks/precheck.yml b/ansible/roles/loadbalancer/tasks/precheck.yml
index 56ecf0690e..a6398ddc24 100644
--- a/ansible/roles/loadbalancer/tasks/precheck.yml
+++ b/ansible/roles/loadbalancer/tasks/precheck.yml
@@ -63,6 +63,7 @@
changed_when: false
when:
- not kolla_externally_managed_cert | bool
+ - not enable_letsencrypt | bool
- kolla_enable_tls_external | bool
- name: Assert that external haproxy certificate exists
@@ -72,6 +73,7 @@
fail_msg: "External haproxy certificate file is not found. It is configured via 'kolla_external_fqdn_cert'"
when:
- not kolla_externally_managed_cert | bool
+ - not enable_letsencrypt | bool
- kolla_enable_tls_external | bool
- name: Checking if internal haproxy certificate exists
@@ -83,6 +85,7 @@
changed_when: false
when:
- not kolla_externally_managed_cert | bool
+ - not enable_letsencrypt | bool
- kolla_enable_tls_internal | bool
- name: Assert that internal haproxy certificate exists
@@ -92,6 +95,7 @@
fail_msg: "Internal haproxy certificate file is not found. It is configured via 'kolla_internal_fqdn_cert'"
when:
- not kolla_externally_managed_cert | bool
+ - not enable_letsencrypt | bool
- kolla_enable_tls_internal | bool
- name: Checking the kolla_external_vip_interface is present
diff --git a/ansible/roles/loadbalancer/templates/haproxy-ssh/haproxy-ssh.json.j2 b/ansible/roles/loadbalancer/templates/haproxy-ssh/haproxy-ssh.json.j2
new file mode 100644
index 0000000000..418be139d7
--- /dev/null
+++ b/ansible/roles/loadbalancer/templates/haproxy-ssh/haproxy-ssh.json.j2
@@ -0,0 +1,17 @@
+{
+ "command": "/usr/sbin/sshd -D",
+ "config_files": [
+ {
+ "source": "{{ container_config_directory }}/sshd_config",
+ "dest": "/etc/ssh/sshd_config",
+ "owner": "root",
+ "perm": "0600"
+ },
+ {
+ "source": "{{ container_config_directory }}/id_rsa.pub",
+ "dest": "/var/lib/haproxy/.ssh/authorized_keys",
+ "owner": "haproxy",
+ "perm": "0600"
+ }
+ ]
+}
diff --git a/ansible/roles/loadbalancer/templates/haproxy-ssh/id_rsa.pub b/ansible/roles/loadbalancer/templates/haproxy-ssh/id_rsa.pub
new file mode 100644
index 0000000000..e7b2ce1c99
--- /dev/null
+++ b/ansible/roles/loadbalancer/templates/haproxy-ssh/id_rsa.pub
@@ -0,0 +1 @@
+{{ haproxy_ssh_key.public_key }}
diff --git a/ansible/roles/loadbalancer/templates/haproxy-ssh/sshd_config.j2 b/ansible/roles/loadbalancer/templates/haproxy-ssh/sshd_config.j2
new file mode 100644
index 0000000000..287fd195a9
--- /dev/null
+++ b/ansible/roles/loadbalancer/templates/haproxy-ssh/sshd_config.j2
@@ -0,0 +1,5 @@
+Port {{ haproxy_ssh_port }}
+ListenAddress {{ api_interface_address }}
+
+SyslogFacility AUTHPRIV
+UsePAM yes
diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy.json.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy.json.j2
index a51a8ed7ab..adc7e343a8 100644
--- a/ansible/roles/loadbalancer/templates/haproxy/haproxy.json.j2
+++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy.json.j2
@@ -18,20 +18,29 @@
"dest": "/etc/haproxy/services.d",
"owner": "root",
"perm": "0700"
+ }{% if kolla_enable_tls_external | bool and not enable_letsencrypt | bool %},
+ {
+ "source": "{{ container_config_directory }}/external-frontend-map",
+ "dest": "/etc/haproxy/external-frontend-map",
+ "owner": "root",
+ "perm": "0600",
+ "optional": {{ (not haproxy_single_external_frontend | bool) | string | lower }}
},
{
"source": "{{ container_config_directory }}/haproxy.pem",
- "dest": "/etc/haproxy/haproxy.pem",
- "owner": "root",
+ "dest": "/etc/haproxy/certificates/haproxy.pem",
+ "owner": "haproxy",
"perm": "0600",
"optional": {{ (not kolla_enable_tls_external | bool) | string | lower }}
- },
+ }{% endif %}
+ {% if kolla_enable_tls_internal | bool and not enable_letsencrypt | bool %},
{
"source": "{{ container_config_directory }}/haproxy-internal.pem",
- "dest": "/etc/haproxy/haproxy-internal.pem",
- "owner": "root",
+ "dest": "/etc/haproxy/certificates/haproxy-internal.pem",
+ "owner": "haproxy",
"perm": "0600",
"optional": {{ (not kolla_enable_tls_internal | bool) | string | lower }}
}
+ {% endif %}
]
}
diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2
new file mode 100644
index 0000000000..87922259e2
--- /dev/null
+++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_external_frontend.cfg.j2
@@ -0,0 +1,11 @@
+{%- set external_tls_bind_info = 'ssl crt /etc/haproxy/certificates/haproxy.pem' if kolla_enable_tls_external|bool else '' %}
+
+frontend external_frontend
+ mode http
+ http-request del-header X-Forwarded-Proto
+{% for http_option in haproxy_external_single_frontend_options %}
+ {{ http_option }}
+{% endfor %}
+ http-request set-header X-Forwarded-Proto https if { ssl_fc }
+ bind {{ kolla_external_vip_address }}:{{ haproxy_external_single_frontend_public_port }} {{ external_tls_bind_info }}
+ use_backend %[req.hdr(host),lower,map_dom(/etc/haproxy/external-frontend-map,{{ haproxy_external_single_frontend_default_backend }})]
diff --git a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2 b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
index 91cf78f4a9..692b39e0e3 100644
--- a/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
+++ b/ansible/roles/loadbalancer/templates/haproxy/haproxy_run.sh.j2
@@ -1,9 +1,40 @@
#!/bin/bash -x
-# We need to run haproxy with one `-f` for each service, because including an
-# entire config directory was not a feature until version 1.7 of HAProxy.
-# So, append "-f $cfg" to the haproxy command for each service file.
-# This will run haproxy_cmd *exactly once*.
+{% if kolla_enable_tls_internal | bool or kolla_enable_tls_external | bool %}
+{% if kolla_enable_tls_external | bool %}
+if [ ! -e "/etc/haproxy/certificates/haproxy.pem" ]; then
+ # Generate temporary self-signed cert
+ # This means external tls is enabled but the certificate was not copied
+ # to the container - so letsencrypt is enabled
+ #
+ # Let's generate certificate to make haproxy happy, lego will
+ # replace it in a while
+ ssl_tmp_dir=$(mktemp -d)
+ openssl req -x509 -newkey rsa:2048 -sha256 -days 1 -nodes -keyout ${ssl_tmp_dir}/haproxy$$.key -out ${ssl_tmp_dir}/haproxy$$.crt -subj "/CN={{ kolla_external_fqdn }}"
+ cat ${ssl_tmp_dir}/haproxy$$.crt ${ssl_tmp_dir}/haproxy$$.key> /etc/haproxy/certificates/haproxy.pem
+ rm -rf ${ssl_tmp_dir}
+ chown haproxy:haproxy /etc/haproxy/certificates/haproxy.pem
+ chmod 0660 /etc/haproxy/certificates/haproxy.pem
+fi
+{% endif %}
+{% if kolla_enable_tls_internal | bool %}
+if [ ! -e "/etc/haproxy/certificates/haproxy-internal.pem" ]; then
+ # Generate temporary self-signed cert
+ # This means external tls is enabled but the certificate was not copied
+ # to the container - so letsencrypt is enabled
+ #
+ # Let's generate certificate to make haproxy happy, lego will
+ # replace it in a while
+ ssl_tmp_dir=$(mktemp -d)
+ openssl req -x509 -newkey rsa:2048 -sha256 -days 1 -nodes -keyout ${ssl_tmp_dir}/haproxy-internal$$.key -out ${ssl_tmp_dir}/haproxy-internal$$.crt -subj "/CN={{ kolla_internal_fqdn }}"
+ cat ${ssl_tmp_dir}/haproxy-internal$$.crt ${ssl_tmp_dir}/haproxy-internal$$.key> /etc/haproxy/certificates/haproxy-internal.pem
+ rm -rf ${ssl_tmp_dir}
+ chown haproxy:haproxy /etc/haproxy/certificates/haproxy-internal.pem
+ chmod 0660 /etc/haproxy/certificates/haproxy-internal.pem
+fi
+{% endif %}
+{% endif %}
+
find /etc/haproxy/services.d/ -mindepth 1 -print0 | \
xargs -0 -Icfg echo -f cfg | \
xargs /usr/sbin/haproxy -W -db -p /run/haproxy.pid -f /etc/haproxy/haproxy.cfg
diff --git a/ansible/roles/magnum/defaults/main.yml b/ansible/roles/magnum/defaults/main.yml
index c929842e5b..394f03ae28 100644
--- a/ansible/roles/magnum/defaults/main.yml
+++ b/ansible/roles/magnum/defaults/main.yml
@@ -16,11 +16,14 @@ magnum_services:
mode: "http"
external: false
port: "{{ magnum_api_port }}"
+ listen_port: "{{ magnum_api_listen_port }}"
magnum_api_external:
enabled: "{{ enable_magnum }}"
mode: "http"
external: true
- port: "{{ magnum_api_port }}"
+ external_fqdn: "{{ magnum_external_fqdn }}"
+ port: "{{ magnum_api_public_port }}"
+ listen_port: "{{ magnum_api_listen_port }}"
magnum-conductor:
container_name: magnum_conductor
group: magnum-conductor
@@ -133,8 +136,8 @@ magnum_conductor_container_proxy: "{{ container_proxy }}"
####################
# OpenStack
####################
-magnum_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ magnum_api_port }}/v1"
-magnum_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ magnum_api_port }}/v1"
+magnum_internal_endpoint: "{{ magnum_internal_fqdn | kolla_url(internal_protocol, magnum_api_port, '/v1') }}"
+magnum_public_endpoint: "{{ magnum_external_fqdn | kolla_url(public_protocol, magnum_api_public_port, '/v1') }}"
magnum_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/magnum/tasks/config.yml b/ansible/roles/magnum/tasks/config.yml
index c299631058..2af33760a6 100644
--- a/ansible/roles/magnum/tasks/config.yml
+++ b/ansible/roles/magnum/tasks/config.yml
@@ -31,6 +31,37 @@
when:
- magnum_policy.results
+- name: Check if kubeconfig file is supplied
+ stat:
+ path: "{{ node_custom_config }}/magnum/kubeconfig"
+ delegate_to: localhost
+ run_once: True
+ register: magnum_kubeconfig_file
+
+- name: Copying over kubeconfig file
+ template:
+ src: "{{ node_custom_config }}/magnum/kubeconfig"
+ dest: "{{ node_config_directory }}/{{ item.key }}/kubeconfig"
+ mode: "0660"
+ become: true
+ when:
+ - inventory_hostname in groups[item.value.group]
+ - item.value.enabled | bool
+ - magnum_kubeconfig_file.stat.exists
+ with_dict: "{{ magnum_services }}"
+ notify:
+ - Restart {{ item.key }} container
+
+- name: Set magnum kubeconfig file's path
+ set_fact:
+ magnum_kubeconfig_file_path: "{{ magnum_kubeconfig_file.stat.path }}"
+ when:
+ - magnum_kubeconfig_file.stat.exists
+
+- name: Enable magnum cluster-api helm driver if kubeconfig exists
+ set_fact:
+ magnum_capi_helm_driver_enabled: "{{ magnum_kubeconfig_file.stat.exists | bool }}"
+
- include_tasks: copy-certs.yml
when:
- kolla_copy_ca_into_containers | bool
diff --git a/ansible/roles/magnum/templates/magnum-api.json.j2 b/ansible/roles/magnum/templates/magnum-api.json.j2
index 9737ad8072..f3b172b812 100644
--- a/ansible/roles/magnum/templates/magnum-api.json.j2
+++ b/ansible/roles/magnum/templates/magnum-api.json.j2
@@ -6,7 +6,14 @@
"dest": "/etc/magnum/magnum.conf",
"owner": "magnum",
"perm": "0600"
- }{% if magnum_policy_file is defined %},
+ }{% if magnum_kubeconfig_file_path is defined %},
+ {
+ "source": "{{ container_config_directory }}/kubeconfig",
+ "dest": "/var/lib/magnum/.kube/config",
+ "owner": "magnum",
+ "perm": "0600"
+ }{% endif %}
+ {% if magnum_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ magnum_policy_file }}",
"dest": "/etc/magnum/{{ magnum_policy_file }}",
diff --git a/ansible/roles/magnum/templates/magnum-conductor.json.j2 b/ansible/roles/magnum/templates/magnum-conductor.json.j2
index f77b1609d1..13a3f2062f 100644
--- a/ansible/roles/magnum/templates/magnum-conductor.json.j2
+++ b/ansible/roles/magnum/templates/magnum-conductor.json.j2
@@ -6,7 +6,14 @@
"dest": "/etc/magnum/magnum.conf",
"owner": "magnum",
"perm": "0600"
- }{% if magnum_policy_file is defined %},
+ }{% if magnum_kubeconfig_file_path is defined %},
+ {
+ "source": "{{ container_config_directory }}/kubeconfig",
+ "dest": "/var/lib/magnum/.kube/config",
+ "owner": "magnum",
+ "perm": "0600"
+ }{% endif %}
+ {% if magnum_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ magnum_policy_file }}",
"dest": "/etc/magnum/{{ magnum_policy_file }}",
diff --git a/ansible/roles/magnum/templates/magnum.conf.j2 b/ansible/roles/magnum/templates/magnum.conf.j2
index 8422a8ce39..acf8b81243 100644
--- a/ansible/roles/magnum/templates/magnum.conf.j2
+++ b/ansible/roles/magnum/templates/magnum.conf.j2
@@ -151,3 +151,8 @@ trace_sqlalchemy = true
hmac_keys = {{ osprofiler_secret }}
connection_string = {{ osprofiler_backend_connection_string }}
{% endif %}
+
+{% if magnum_kubeconfig_file_path is not defined %}
+[drivers]
+disabled_drivers = k8s_cluster_api_flatcar,k8s_cluster_api_ubuntu,k8s_cluster_api_ubuntu_focal
+{% endif %}
diff --git a/ansible/roles/manila/defaults/main.yml b/ansible/roles/manila/defaults/main.yml
index 8f5a13f1da..cd2173140d 100644
--- a/ansible/roles/manila/defaults/main.yml
+++ b/ansible/roles/manila/defaults/main.yml
@@ -14,11 +14,14 @@ manila_services:
mode: "http"
external: false
port: "{{ manila_api_port }}"
+ listen_port: "{{ manila_api_listen_port }}"
manila_api_external:
enabled: "{{ enable_manila }}"
mode: "http"
external: true
- port: "{{ manila_api_port }}"
+ external_fqdn: "{{ manila_external_fqdn }}"
+ port: "{{ manila_api_public_port }}"
+ listen_port: "{{ manila_api_listen_port }}"
manila-scheduler:
container_name: "manila_scheduler"
group: "manila-scheduler"
@@ -190,8 +193,8 @@ manila_data_extra_volumes: "{{ manila_extra_volumes }}"
#####################
## OpenStack
#####################
-manila_internal_base_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ manila_api_port }}"
-manila_public_base_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ manila_api_port }}"
+manila_internal_base_endpoint: "{{ manila_internal_fqdn | kolla_url(internal_protocol, manila_api_port) }}"
+manila_public_base_endpoint: "{{ manila_external_fqdn | kolla_url(public_protocol, manila_api_public_port) }}"
manila_internal_endpoint: "{{ manila_internal_base_endpoint }}/v1/%(tenant_id)s"
manila_public_endpoint: "{{ manila_public_base_endpoint }}/v1/%(tenant_id)s"
diff --git a/ansible/roles/masakari/defaults/main.yml b/ansible/roles/masakari/defaults/main.yml
index e581e2ac46..6b2f072dfd 100644
--- a/ansible/roles/masakari/defaults/main.yml
+++ b/ansible/roles/masakari/defaults/main.yml
@@ -13,11 +13,14 @@ masakari_services:
mode: "http"
external: false
port: "{{ masakari_api_port }}"
+ listen_port: "{{ masakari_api_listen_port }}"
masakari_api_external:
enabled: "{{ enable_masakari }}"
mode: "http"
external: true
- port: "{{ masakari_api_port }}"
+ external_fqdn: "{{ masakari_external_fqdn }}"
+ port: "{{ masakari_api_public_port }}"
+ listen_port: "{{ masakari_api_listen_port }}"
masakari-engine:
container_name: masakari_engine
group: masakari-engine
@@ -130,8 +133,8 @@ masakari_hostmonitor_default_volumes:
####################
# OpenStack
####################
-masakari_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ masakari_api_port }}"
-masakari_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ masakari_api_port }}"
+masakari_internal_endpoint: "{{ masakari_internal_fqdn | kolla_url(internal_protocol, masakari_api_port) }}"
+masakari_public_endpoint: "{{ masakari_external_fqdn | kolla_url(public_protocol, masakari_api_public_port) }}"
masakari_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/mistral/defaults/main.yml b/ansible/roles/mistral/defaults/main.yml
index 3d1e2c64d1..137f5a3a9c 100644
--- a/ansible/roles/mistral/defaults/main.yml
+++ b/ansible/roles/mistral/defaults/main.yml
@@ -14,11 +14,14 @@ mistral_services:
mode: "http"
external: false
port: "{{ mistral_api_port }}"
+ listen_port: "{{ mistral_api_listen_port }}"
mistral_api_external:
enabled: "{{ enable_mistral }}"
mode: "http"
external: true
- port: "{{ mistral_api_port }}"
+ external_fqdn: "{{ mistral_external_fqdn }}"
+ port: "{{ mistral_api_public_port }}"
+ listen_port: "{{ mistral_api_listen_port }}"
mistral-engine:
container_name: mistral_engine
group: mistral-engine
@@ -184,10 +187,10 @@ mistral_api_extra_volumes: "{{ mistral_extra_volumes }}"
####################
# OpenStack
####################
-mistral_internal_base_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ mistral_api_port }}"
+mistral_internal_base_endpoint: "{{ mistral_internal_fqdn | kolla_url(internal_protocol, mistral_api_port) }}"
mistral_internal_endpoint: "{{ mistral_internal_base_endpoint }}/v2"
-mistral_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ mistral_api_port }}/v2"
+mistral_public_endpoint: "{{ mistral_external_fqdn | kolla_url(public_protocol, mistral_api_public_port, '/v2') }}"
mistral_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/monasca/defaults/main.yml b/ansible/roles/monasca/defaults/main.yml
index 5946aa92b2..6d9723f277 100644
--- a/ansible/roles/monasca/defaults/main.yml
+++ b/ansible/roles/monasca/defaults/main.yml
@@ -13,11 +13,14 @@ monasca_services:
mode: "http"
external: false
port: "{{ monasca_api_port }}"
+ listen_port: "{{ monasca_api_listen_port }}"
monasca_api_external:
enabled: false
mode: "http"
external: true
- port: "{{ monasca_api_port }}"
+ external_fqdn: "{{ monasca_external_fqdn }}"
+ port: "{{ monasca_api_public_port }}"
+ listen_port: "{{ monasca_api_listen_port }}"
monasca-log-persister:
container_name: monasca_log_persister
group: monasca-log-persister
diff --git a/ansible/roles/murano/defaults/main.yml b/ansible/roles/murano/defaults/main.yml
index 473a3d189e..7446518e62 100644
--- a/ansible/roles/murano/defaults/main.yml
+++ b/ansible/roles/murano/defaults/main.yml
@@ -13,11 +13,14 @@ murano_services:
mode: "http"
external: false
port: "{{ murano_api_port }}"
+ listen_port: "{{ murano_api_listen_port }}"
murano_api_external:
enabled: "{{ enable_murano }}"
mode: "http"
external: true
- port: "{{ murano_api_port }}"
+ external_fqdn: "{{ murano_external_fqdn }}"
+ port: "{{ murano_api_public_port }}"
+ listen_port: "{{ murano_api_listen_port }}"
murano-engine:
container_name: murano_engine
group: murano-engine
@@ -90,8 +93,8 @@ murano_engine_extra_volumes: "{{ murano_extra_volumes }}"
####################
# OpenStack
####################
-murano_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ murano_api_port }}"
-murano_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ murano_api_port }}"
+murano_internal_endpoint: "{{ murano_internal_fqdn | kolla_url(internal_protocol, murano_api_port) }}"
+murano_public_endpoint: "{{ murano_external_fqdn | kolla_url(public_protocol, murano_api_public_port) }}"
murano_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/murano/templates/murano.conf.j2 b/ansible/roles/murano/templates/murano.conf.j2
index 8974a2bde1..fbebb12f9d 100644
--- a/ansible/roles/murano/templates/murano.conf.j2
+++ b/ansible/roles/murano/templates/murano.conf.j2
@@ -84,7 +84,7 @@ policy_file = {{ murano_policy_file }}
{% if service_name == 'murano-engine' %}
[rabbitmq]
-host = {{ kolla_external_fqdn }}
+host = {{ rabbitmq_external_fqdn }}
port = {{ outward_rabbitmq_port }}
login = {{ murano_agent_rabbitmq_user }}
password = {{ murano_agent_rabbitmq_password }}
diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml
index f4dc81c07b..be31603cbe 100644
--- a/ansible/roles/neutron/defaults/main.yml
+++ b/ansible/roles/neutron/defaults/main.yml
@@ -20,7 +20,8 @@ neutron_services:
enabled: "{{ enable_neutron | bool and not neutron_enable_tls_backend | bool }}"
mode: "http"
external: true
- port: "{{ neutron_server_port }}"
+ external_fqdn: "{{ neutron_external_fqdn }}"
+ port: "{{ neutron_server_public_port }}"
listen_port: "{{ neutron_server_listen_port }}"
neutron-openvswitch-agent:
container_name: "neutron_openvswitch_agent"
@@ -194,6 +195,7 @@ neutron_services:
enabled: "{{ enable_neutron | bool and neutron_enable_tls_backend | bool }}"
mode: "http"
external: true
+ external_fqdn: "{{ neutron_external_fqdn }}"
port: "{{ neutron_server_port }}"
listen_port: "{{ neutron_server_listen_port }}"
tls_backend: "yes"
diff --git a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2 b/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2
index 1dbaae0ede..5b0ae990b8 100644
--- a/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2
+++ b/ansible/roles/neutron/templates/linuxbridge_agent.ini.j2
@@ -5,7 +5,8 @@ extensions = {{ neutron_agent_extensions|map(attribute='name')|join(',') }}
[linux_bridge]
{% if inventory_hostname in groups["network"] or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool ) %}
-physical_interface_mappings = {% for interface in neutron_external_interface.split(',') %}physnet{{ loop.index0 + 1 }}:{{ interface }}{% if not loop.last %},{% endif %}{% endfor %}
+{# Format: physnet1:br1,physnet2:br2 #}
+physical_interface_mappings = {{ neutron_physical_networks.split(',') | zip(neutron_external_interface.split(',')) | map('join', ':') | join(',') }}
{% endif %}
[securitygroup]
diff --git a/ansible/roles/neutron/templates/ml2_conf.ini.j2 b/ansible/roles/neutron/templates/ml2_conf.ini.j2
index e55423e33c..592b57882a 100644
--- a/ansible/roles/neutron/templates/ml2_conf.ini.j2
+++ b/ansible/roles/neutron/templates/ml2_conf.ini.j2
@@ -15,7 +15,7 @@ extension_drivers = {{ neutron_extension_drivers | map(attribute='name') | join(
[ml2_type_vlan]
{% if enable_ironic | bool %}
-network_vlan_ranges = physnet1
+network_vlan_ranges = {{ neutron_physical_networks }}
{% else %}
network_vlan_ranges =
{% endif %}
@@ -24,7 +24,7 @@ network_vlan_ranges =
{% if enable_ironic | bool %}
flat_networks = *
{% else %}
-flat_networks = {% for interface in neutron_external_interface.split(',') %}physnet{{ loop.index0 + 1 }}{% if not loop.last %},{% endif %}{% endfor %}
+flat_networks = {{ neutron_physical_networks }}
{% endif %}
[ml2_type_vxlan]
diff --git a/ansible/roles/neutron/templates/openvswitch_agent.ini.j2 b/ansible/roles/neutron/templates/openvswitch_agent.ini.j2
index 88834e2dea..8ac25af7e1 100644
--- a/ansible/roles/neutron/templates/openvswitch_agent.ini.j2
+++ b/ansible/roles/neutron/templates/openvswitch_agent.ini.j2
@@ -15,7 +15,8 @@ firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewal
[ovs]
{% if inventory_hostname in groups["network"] or (inventory_hostname in groups["compute"] and computes_need_external_bridge | bool ) %}
-bridge_mappings = {% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}
+{# Format: physnet1:br1,physnet2:br2 #}
+bridge_mappings = {{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}
{% endif %}
datapath_type = {{ ovs_datapath }}
ovsdb_connection = tcp:127.0.0.1:{{ ovsdb_port }}
diff --git a/ansible/roles/nova-cell/tasks/loadbalancer.yml b/ansible/roles/nova-cell/tasks/loadbalancer.yml
index 354fb7003a..2c47ad139f 100644
--- a/ansible/roles/nova-cell/tasks/loadbalancer.yml
+++ b/ansible/roles/nova-cell/tasks/loadbalancer.yml
@@ -48,7 +48,8 @@
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'novnc' }}"
mode: "http"
external: true
- port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_port'] }}"
+ external_fqdn: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_fqdn'] }}"
+ port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_public_port'] }}"
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_novncproxy_listen_port'] }}"
backend_http_extra:
- "timeout tunnel 1h"
@@ -84,7 +85,8 @@
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['nova_console'] == 'spice' }}"
mode: "http"
external: true
- port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_port'] }}"
+ external_fqdn: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_fqdn'] }}"
+ port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_public_port'] }}"
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_spicehtml5proxy_listen_port'] }}"
backend_http_extra:
- "timeout tunnel {{ haproxy_nova_spicehtml5_proxy_tunnel_timeout }}"
@@ -120,7 +122,8 @@
enabled: "{{ hostvars[groups[cell_proxy_group][0]]['enable_nova_serialconsole_proxy'] | bool }}"
mode: "http"
external: true
- port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_port'] }}"
+ external_fqdn: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_fqdn'] }}"
+ port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_public_port'] }}"
listen_port: "{{ hostvars[groups[cell_proxy_group][0]]['nova_serialproxy_listen_port'] }}"
backend_http_extra:
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
diff --git a/ansible/roles/nova-cell/templates/nova.conf.j2 b/ansible/roles/nova-cell/templates/nova.conf.j2
index 789d966340..47f3d1e21e 100644
--- a/ansible/roles/nova-cell/templates/nova.conf.j2
+++ b/ansible/roles/nova-cell/templates/nova.conf.j2
@@ -54,7 +54,7 @@ novncproxy_port = {{ nova_novncproxy_listen_port }}
server_listen = {{ api_interface_address }}
server_proxyclient_address = {{ api_interface_address }}
{% if inventory_hostname in groups[nova_cell_compute_group] %}
-novncproxy_base_url = {{ public_protocol }}://{{ nova_novncproxy_fqdn | put_address_in_context('url') }}:{{ nova_novncproxy_port }}/vnc_lite.html
+novncproxy_base_url = {{ nova_novncproxy_fqdn | kolla_url(public_protocol, nova_novncproxy_public_port, '/vnc_lite.html') }}
{% endif %}
{% endif %}
{% elif nova_console == 'spice' %}
@@ -66,7 +66,7 @@ enabled = true
server_listen = {{ api_interface_address }}
server_proxyclient_address = {{ api_interface_address }}
{% if inventory_hostname in groups[nova_cell_compute_group] %}
-html5proxy_base_url = {{ public_protocol }}://{{ nova_spicehtml5proxy_fqdn | put_address_in_context('url') }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
+html5proxy_base_url = {{ nova_spicehtml5proxy_fqdn | kolla_url(public_protocol, nova_spicehtml5proxy_public_port, '/spice_auto.html') }}
{% endif %}
html5proxy_host = {{ api_interface_address }}
html5proxy_port = {{ nova_spicehtml5proxy_listen_port }}
@@ -79,7 +79,7 @@ enabled = false
{% if enable_nova_serialconsole_proxy | bool %}
[serial_console]
enabled = true
-base_url = {{ nova_serialproxy_protocol }}://{{ nova_serialproxy_fqdn | put_address_in_context('url') }}:{{ nova_serialproxy_port }}/
+base_url = {{ nova_serialproxy_fqdn | kolla_url(nova_serialproxy_protocol, nova_serialproxy_public_port) }}/
serialproxy_host = {{ api_interface_address }}
serialproxy_port = {{ nova_serialproxy_listen_port }}
proxyclient_address = {{ api_interface_address }}
diff --git a/ansible/roles/nova/defaults/main.yml b/ansible/roles/nova/defaults/main.yml
index cc9cc955d9..531e51dd1d 100644
--- a/ansible/roles/nova/defaults/main.yml
+++ b/ansible/roles/nova/defaults/main.yml
@@ -21,7 +21,8 @@ nova_services:
enabled: "{{ enable_nova }}"
mode: "http"
external: true
- port: "{{ nova_api_port }}"
+ external_fqdn: "{{ nova_external_fqdn }}"
+ port: "{{ nova_api_public_port }}"
listen_port: "{{ nova_api_listen_port }}"
tls_backend: "{{ nova_enable_tls_backend }}"
nova_metadata:
@@ -35,6 +36,7 @@ nova_services:
enabled: "{{ nova_enable_external_metadata }}"
mode: "http"
external: true
+ external_fqdn: "{{ nova_metadata_external_fqdn }}"
port: "{{ nova_metadata_port }}"
listen_port: "{{ nova_metadata_listen_port }}"
tls_backend: "{{ nova_enable_tls_backend }}"
@@ -196,8 +198,8 @@ nova_api_bootstrap_extra_volumes: "{{ nova_extra_volumes }}"
####################
# OpenStack
####################
-nova_internal_base_endpoint: "{{ internal_protocol }}://{{ nova_internal_fqdn | put_address_in_context('url') }}:{{ nova_api_port }}"
-nova_public_base_endpoint: "{{ public_protocol }}://{{ nova_external_fqdn | put_address_in_context('url') }}:{{ nova_api_port }}"
+nova_internal_base_endpoint: "{{ nova_internal_fqdn | kolla_url(internal_protocol, nova_api_port) }}"
+nova_public_base_endpoint: "{{ nova_external_fqdn | kolla_url(public_protocol, nova_api_public_port) }}"
nova_legacy_internal_endpoint: "{{ nova_internal_base_endpoint }}/v2/%(tenant_id)s"
nova_legacy_public_endpoint: "{{ nova_public_base_endpoint }}/v2/%(tenant_id)s"
diff --git a/ansible/roles/octavia-certificates/defaults/main.yml b/ansible/roles/octavia-certificates/defaults/main.yml
index 67fe9085af..2061dbe438 100644
--- a/ansible/roles/octavia-certificates/defaults/main.yml
+++ b/ansible/roles/octavia-certificates/defaults/main.yml
@@ -43,3 +43,6 @@ octavia_certs_client_req_organizational_unit: "{{ octavia_certs_organizational_u
# NOTE(yoctozepto): This should ideally be per controller, i.e. controller
# generates its key&CSR and this CA signs it.
octavia_certs_client_req_common_name: client.example.org
+
+# Used with command `kolla-ansible octavia-certificates --check-expiry `.
+octavia_certs_check_expiry: "no"
diff --git a/ansible/roles/octavia-certificates/tasks/check_expiry.yml b/ansible/roles/octavia-certificates/tasks/check_expiry.yml
new file mode 100644
index 0000000000..66ed8e4b0c
--- /dev/null
+++ b/ansible/roles/octavia-certificates/tasks/check_expiry.yml
@@ -0,0 +1,24 @@
+---
+- name: Gather information on certificates
+ community.crypto.x509_certificate_info:
+ path: "{{ node_custom_config }}/octavia/{{ item }}"
+ valid_at:
+ point_1: "+{{ octavia_certs_expiry_limit | int }}d"
+ register: cert_info
+ delegate_to: localhost
+ with_items:
+ - "server_ca.cert.pem"
+ - "client_ca.cert.pem"
+ - "client.cert-and-key.pem"
+
+- name: Check whether certificates are valid within {{ octavia_certs_expiry_limit }} days
+ assert:
+ that:
+ - item.valid_at.point_1
+ fail_msg: "{{ item.item }} will expire within {{ octavia_certs_expiry_limit }} days, on {{ item.not_after }}"
+ success_msg: "{{ item.item }} will not expire within {{ octavia_certs_expiry_limit }} days. It expires on {{ item.not_after }}"
+ quiet: True
+ loop: "{{ cert_info.results }}"
+ loop_control:
+ label: "{{ item.item }}"
+ delegate_to: localhost
diff --git a/ansible/roles/octavia-certificates/tasks/main.yml b/ansible/roles/octavia-certificates/tasks/main.yml
index ed58ec436c..9ba737b2bd 100644
--- a/ansible/roles/octavia-certificates/tasks/main.yml
+++ b/ansible/roles/octavia-certificates/tasks/main.yml
@@ -7,38 +7,45 @@
# Kolla Ansible prepares and controls the Client CA certificate and key.
# Client CA is used to generate certificates for Octavia controllers.
-- name: Ensure server_ca and client_ca directories exist
- file:
- path: "{{ octavia_certs_work_dir }}/{{ item }}"
- state: "directory"
- mode: 0770
- loop:
- - server_ca
- - client_ca
-
-- name: Copy openssl.cnf
- copy:
- src: "{{ octavia_certs_openssl_cnf_path }}"
- dest: "{{ octavia_certs_work_dir }}/openssl.cnf"
-
-- import_tasks: server_ca.yml
-
-- import_tasks: client_ca.yml
-
-- import_tasks: client_cert.yml
-
-- name: Ensure {{ node_custom_config }}/octavia directory exists
- file:
- path: "{{ node_custom_config }}/octavia"
- state: "directory"
- mode: 0770
-
-- name: Copy the to-be-deployed keys and certs to {{ node_custom_config }}/octavia
- copy:
- src: "{{ octavia_certs_work_dir }}/{{ item.src }}"
- dest: "{{ node_custom_config }}/octavia/{{ item.dest }}"
- with_items:
- - { src: "server_ca/server_ca.cert.pem", dest: "server_ca.cert.pem" }
- - { src: "server_ca/server_ca.key.pem", dest: "server_ca.key.pem" }
- - { src: "client_ca/client_ca.cert.pem", dest: "client_ca.cert.pem" }
- - { src: "client_ca/client.cert-and-key.pem", dest: "client.cert-and-key.pem" }
+- name: Check if any certificates are going to expire
+ include_tasks: check_expiry.yml
+ when: octavia_certs_check_expiry | bool
+
+- block:
+ - name: Ensure server_ca and client_ca directories exist
+ file:
+ path: "{{ octavia_certs_work_dir }}/{{ item }}"
+ state: "directory"
+ mode: 0770
+ loop:
+ - server_ca
+ - client_ca
+
+ - name: Copy openssl.cnf
+ copy:
+ src: "{{ octavia_certs_openssl_cnf_path }}"
+ dest: "{{ octavia_certs_work_dir }}/openssl.cnf"
+
+ - import_tasks: server_ca.yml
+
+ - import_tasks: client_ca.yml
+
+ - import_tasks: client_cert.yml
+
+ - name: Ensure {{ node_custom_config }}/octavia directory exists
+ file:
+ path: "{{ node_custom_config }}/octavia"
+ state: "directory"
+ mode: 0770
+
+ - name: Copy the to-be-deployed keys and certs to {{ node_custom_config }}/octavia
+ copy:
+ src: "{{ octavia_certs_work_dir }}/{{ item.src }}"
+ dest: "{{ node_custom_config }}/octavia/{{ item.dest }}"
+ with_items:
+ - { src: "server_ca/server_ca.cert.pem", dest: "server_ca.cert.pem" }
+ - { src: "server_ca/server_ca.key.pem", dest: "server_ca.key.pem" }
+ - { src: "client_ca/client_ca.cert.pem", dest: "client_ca.cert.pem" }
+ - { src: "client_ca/client.cert-and-key.pem", dest: "client.cert-and-key.pem" }
+
+ when: not octavia_certs_check_expiry | bool
diff --git a/ansible/roles/octavia/defaults/main.yml b/ansible/roles/octavia/defaults/main.yml
index 0ff24fc6cd..a170389d9b 100644
--- a/ansible/roles/octavia/defaults/main.yml
+++ b/ansible/roles/octavia/defaults/main.yml
@@ -20,7 +20,8 @@ octavia_services:
enabled: "{{ enable_octavia }}"
mode: "http"
external: true
- port: "{{ octavia_api_port }}"
+ external_fqdn: "{{ octavia_external_fqdn }}"
+ port: "{{ octavia_api_public_port }}"
listen_port: "{{ octavia_api_listen_port }}"
tls_backend: "{{ octavia_enable_tls_backend }}"
octavia-driver-agent:
diff --git a/ansible/roles/opensearch/defaults/main.yml b/ansible/roles/opensearch/defaults/main.yml
index d65d0e0318..da66a9d312 100644
--- a/ansible/roles/opensearch/defaults/main.yml
+++ b/ansible/roles/opensearch/defaults/main.yml
@@ -40,7 +40,9 @@ opensearch_services:
enabled: "{{ enable_opensearch_dashboards_external | bool }}"
mode: "http"
external: true
+ external_fqdn: "{{ opensearch_dashboards_external_fqdn }}"
port: "{{ opensearch_dashboards_port_external }}"
+ listen_port: "{{ opensearch_dashboards_listen_port }}"
auth_user: "{{ opensearch_dashboards_user }}"
auth_pass: "{{ opensearch_dashboards_password }}"
diff --git a/ansible/roles/ovn-controller/tasks/setup-ovs.yml b/ansible/roles/ovn-controller/tasks/setup-ovs.yml
index 5ac61a16b3..0d9ab0e30c 100644
--- a/ansible/roles/ovn-controller/tasks/setup-ovs.yml
+++ b/ansible/roles/ovn-controller/tasks/setup-ovs.yml
@@ -12,8 +12,10 @@
- name: Configure OVN in OVSDB
vars:
- ovn_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
- ovn_macs: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname + bridge) }}{% if not loop.last %},{% endif %}{% endfor %}"
+ # Format: physnet1:br1,physnet2:br2
+ ovn_mappings: "{{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
+ # Format: physnet1:00:11:22:33:44:55,physnet2:00:11:22:33:44:56
+ ovn_macs: "{% for physnet, bridge in neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) %}{{ physnet }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname + bridge) }}{% if not loop.last %},{% endif %}{% endfor %}"
ovn_cms_opts: "{{ 'enable-chassis-as-gw' if inventory_hostname in groups['ovn-controller-network'] else '' }}{{ ',availability-zones=' + neutron_ovn_availability_zones | join(',') if inventory_hostname in groups['ovn-controller-network'] and neutron_ovn_availability_zones }}"
become: true
kolla_toolbox:
diff --git a/ansible/roles/ovs-dpdk/defaults/main.yml b/ansible/roles/ovs-dpdk/defaults/main.yml
index 2b052f27dd..09a7f7dbdc 100644
--- a/ansible/roles/ovs-dpdk/defaults/main.yml
+++ b/ansible/roles/ovs-dpdk/defaults/main.yml
@@ -37,8 +37,10 @@ ovsdpdk_services:
####################
# OVS
####################
-ovs_bridge_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
-ovs_port_mappings: "{% for bridge in neutron_bridge_name.split(',') %} {{ neutron_external_interface.split(',')[loop.index0] }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
+# Format: physnet1:br1,physnet2:br2
+ovs_bridge_mappings: "{{ neutron_physical_networks.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
+# Format: eth1:br1,eth2:br2
+ovs_port_mappings: "{{ neutron_external_interface.split(',') | zip(neutron_bridge_name.split(',')) | map('join', ':') | join(',') }}"
tunnel_interface_network: "{{ hostvars[inventory_hostname].ansible_facts[dpdk_tunnel_interface]['ipv4']['network'] }}/{{ hostvars[inventory_hostname].ansible_facts[dpdk_tunnel_interface]['ipv4']['netmask'] }}"
tunnel_interface_cidr: "{{ dpdk_tunnel_interface_address }}/{{ tunnel_interface_network | ipaddr('prefix') }}"
ovs_cidr_mappings: "{% if neutron_bridge_name.split(',') | length != 1 %} {neutron_bridge_name.split(',')[0]}:{{ tunnel_interface_cidr }} {% else %} {{ neutron_bridge_name }}:{{ tunnel_interface_cidr }} {% endif %}"
diff --git a/ansible/roles/placement/defaults/main.yml b/ansible/roles/placement/defaults/main.yml
index 51c78739bd..1d2734a9f8 100644
--- a/ansible/roles/placement/defaults/main.yml
+++ b/ansible/roles/placement/defaults/main.yml
@@ -20,7 +20,8 @@ placement_services:
enabled: "{{ enable_placement }}"
mode: "http"
external: true
- port: "{{ placement_api_port }}"
+ external_fqdn: "{{ placement_external_fqdn }}"
+ port: "{{ placement_api_public_port }}"
listen_port: "{{ placement_api_listen_port }}"
tls_backend: "{{ placement_enable_tls_backend }}"
@@ -88,8 +89,8 @@ placement_api_extra_volumes: "{{ default_extra_volumes }}"
####################
# OpenStack
####################
-placement_internal_endpoint: "{{ internal_protocol }}://{{ placement_internal_fqdn | put_address_in_context('url') }}:{{ placement_api_port }}"
-placement_public_endpoint: "{{ public_protocol }}://{{ placement_external_fqdn | put_address_in_context('url') }}:{{ placement_api_port }}"
+placement_internal_endpoint: "{{ placement_internal_fqdn | kolla_url(internal_protocol, placement_api_port) }}"
+placement_public_endpoint: "{{ placement_external_fqdn | kolla_url(public_protocol, placement_api_public_port) }}"
placement_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml
index a72d867831..90152bef25 100644
--- a/ansible/roles/prometheus/defaults/main.yml
+++ b/ansible/roles/prometheus/defaults/main.yml
@@ -14,6 +14,14 @@ prometheus_services:
external: false
port: "{{ prometheus_port }}"
active_passive: "{{ prometheus_active_passive | bool }}"
+ prometheus_server_external:
+ enabled: "{{ enable_prometheus_server_external | bool }}"
+ mode: "http"
+ external: true
+ external_fqdn: "{{ prometheus_external_fqdn }}"
+ port: "{{ prometheus_public_port }}"
+ listen_port: "{{ prometheus_listen_port }}"
+ active_passive: "{{ prometheus_active_passive | bool }}"
prometheus-node-exporter:
container_name: prometheus_node_exporter
group: prometheus-node-exporter
@@ -70,7 +78,9 @@ prometheus_services:
enabled: "{{ enable_prometheus_alertmanager_external | bool }}"
mode: "http"
external: true
- port: "{{ prometheus_alertmanager_port }}"
+ external_fqdn: "{{ prometheus_alertmanager_external_fqdn }}"
+ port: "{{ prometheus_alertmanager_public_port }}"
+ listen_port: "{{ prometheus_alertmanager_listen_port }}"
auth_user: "{{ prometheus_alertmanager_user }}"
auth_pass: "{{ prometheus_alertmanager_password }}"
active_passive: "{{ prometheus_alertmanager_active_passive | bool }}"
@@ -134,6 +144,26 @@ prometheus_services:
prometheus_external_labels:
# :
+####################
+# Server
+####################
+enable_prometheus_server_external: false
+
+####################
+# Basic Auth
+####################
+prometheus_basic_auth_users: "{{ prometheus_basic_auth_users_default + prometheus_basic_auth_users_extra }}"
+
+prometheus_basic_auth_users_default:
+ - username: admin
+ password: "{{ prometheus_password }}"
+ enabled: true
+ - username: "{{ prometheus_grafana_user }}"
+ password: "{{ prometheus_grafana_password }}"
+ enabled: "{{ enable_grafana }}"
+
+prometheus_basic_auth_users_extra: []
+
####################
# Database
####################
@@ -145,6 +175,11 @@ prometheus_mysql_exporter_database_user: "{% if use_preconfigured_databases | bo
prometheus_active_passive: true
prometheus_alertmanager_active_passive: true
+####################
+# Node Exporter
+####################
+prometheus_node_exporter_targets_extra: []
+
####################
# Blackbox
####################
@@ -153,7 +188,7 @@ prometheus_alertmanager_active_passive: true
# 'service_name:blackbox_exporter_module:endpoint' for example:
#
# prometheus_blackbox_exporter_targets:
-# - 'glance:os_endpoint:{{ external_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ glance_api_port}}'
+# - 'glance:os_endpoint:{{ external_protocol }}://{{ glance_external_fqdn | put_address_in_context('url') }}:{{ glance_api_port}}'
#
# For a list of modules see the alertmanager config.
prometheus_blackbox_exporter_endpoints: []
@@ -317,6 +352,12 @@ prometheus_openstack_exporter_disabled_object: "{{ '--disable-service.object-sto
prometheus_openstack_exporter_disabled_lb: "{{ '--disable-service.load-balancer --disable-metric=neutron-loadbalancers --disable-metric=neutron-loadbalancers_not_active' if not enable_octavia | bool else '' }}"
prometheus_openstack_exporter_disabled_items: "{{ [prometheus_openstack_exporter_disabled_volume, prometheus_openstack_exporter_disabled_dns, prometheus_openstack_exporter_disabled_object, prometheus_openstack_exporter_disabled_lb | trim] | join(' ') | trim }}"
+prometheus_server_command: >-
+ /opt/prometheus/prometheus --web.config.file=/etc/prometheus/web.yml --config.file /etc/prometheus/prometheus.yml
+ --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_port }}
+ --web.external-url={{ prometheus_public_endpoint if enable_prometheus_server_external else prometheus_internal_endpoint }}
+ --storage.tsdb.path /var/lib/prometheus{% if prometheus_cmdline_extras %} {{ prometheus_cmdline_extras }}{% endif %}
+
prometheus_blackbox_exporter_cmdline_extras: ""
prometheus_cadvisor_cmdline_extras: "--docker_only --store_container_labels=false --disable_metrics=percpu,referenced_memory,cpu_topology,resctrl,udp,advtcp,sched,hugetlb,memory_numa,tcp,process --housekeeping_interval={{ prometheus_scrape_interval }}"
prometheus_elasticsearch_exporter_cmdline_extras: ""
diff --git a/ansible/roles/prometheus/tasks/config.yml b/ansible/roles/prometheus/tasks/config.yml
index f55f6b5baf..9ad3fa9116 100644
--- a/ansible/roles/prometheus/tasks/config.yml
+++ b/ansible/roles/prometheus/tasks/config.yml
@@ -97,6 +97,24 @@
notify:
- Restart prometheus-server container
+- name: Copying over prometheus web config file
+ become: true
+ vars:
+ service: "{{ prometheus_services['prometheus-server'] }}"
+ template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/prometheus-server/web.yml"
+ mode: "0600"
+ when:
+ - inventory_hostname in groups[service.group]
+ - service.enabled | bool
+ with_first_found:
+ - "{{ node_custom_config }}/prometheus/{{ inventory_hostname }}/web.yml"
+ - "{{ node_custom_config }}/prometheus/web.yml"
+ - "{{ role_path }}/templates/prometheus-web.yml.j2"
+ notify:
+ - Restart prometheus-server container
+
- name: Copying over prometheus alertmanager config file
become: true
vars:
diff --git a/ansible/roles/prometheus/tasks/precheck.yml b/ansible/roles/prometheus/tasks/precheck.yml
index af0592a3a5..744bc4ad7c 100644
--- a/ansible/roles/prometheus/tasks/precheck.yml
+++ b/ansible/roles/prometheus/tasks/precheck.yml
@@ -25,6 +25,28 @@
check_mode: false
register: container_facts
+- name: Check that prometheus_bcrypt_salt is correctly set
+ assert:
+ that:
+ - prometheus_bcrypt_salt is defined
+ - prometheus_bcrypt_salt is string
+ - prometheus_bcrypt_salt | length == 22
+
+- name: Check that prometheus_password is correctly set
+ assert:
+ that:
+ - prometheus_password is defined
+ - prometheus_password is string
+ - prometheus_password | length > 0
+
+- name: Check that prometheus_grafana_password is correctly set
+ assert:
+ that:
+ - prometheus_grafana_password is defined
+ - prometheus_grafana_password is string
+ - prometheus_grafana_password | length > 0
+ when: enable_grafana | bool
+
- name: Checking free port for Prometheus server
wait_for:
host: "{{ 'api' | kolla_address }}"
diff --git a/ansible/roles/prometheus/templates/prometheus-server.json.j2 b/ansible/roles/prometheus/templates/prometheus-server.json.j2
index d57469ff2c..99ee0c7865 100644
--- a/ansible/roles/prometheus/templates/prometheus-server.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-server.json.j2
@@ -1,5 +1,5 @@
{
- "command": "/opt/prometheus/prometheus --config.file /etc/prometheus/prometheus.yml --web.listen-address {{ api_interface_address | put_address_in_context('url') }}:{{ prometheus_port }} --web.external-url={{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ prometheus_port }} --storage.tsdb.path /var/lib/prometheus{% if prometheus_cmdline_extras %} {{ prometheus_cmdline_extras }}{% endif %}",
+ "command": "{{ prometheus_server_command }}",
"config_files": [
{
"source": "{{ container_config_directory }}/prometheus.yml",
@@ -7,6 +7,12 @@
"owner": "prometheus",
"perm": "0600"
},
+ {
+ "source": "{{ container_config_directory }}/web.yml",
+ "dest": "/etc/prometheus/web.yml",
+ "owner": "prometheus",
+ "perm": "0600"
+ },
{
"source": "{{ container_config_directory }}/extras/*",
"dest": "/etc/prometheus/extras/",
diff --git a/ansible/roles/prometheus/templates/prometheus-web.yml.j2 b/ansible/roles/prometheus/templates/prometheus-web.yml.j2
new file mode 100644
index 0000000000..67e0554285
--- /dev/null
+++ b/ansible/roles/prometheus/templates/prometheus-web.yml.j2
@@ -0,0 +1,4 @@
+basic_auth_users:
+{% for user in prometheus_basic_auth_users | selectattr('enabled') | list %}
+ {{ user.username }}: {{ user.password | password_hash('bcrypt', salt=prometheus_bcrypt_salt) }}
+{% endfor %}
diff --git a/ansible/roles/prometheus/templates/prometheus.yml.j2 b/ansible/roles/prometheus/templates/prometheus.yml.j2
index f0ff6c8461..c56c507c0c 100644
--- a/ansible/roles/prometheus/templates/prometheus.yml.j2
+++ b/ansible/roles/prometheus/templates/prometheus.yml.j2
@@ -18,78 +18,121 @@ rule_files:
scrape_configs:
- job_name: prometheus
+ basic_auth:
+ username: admin
+ password: "{{ prometheus_password }}"
static_configs:
- - targets:
{% for host in groups['prometheus'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ prometheus_port }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% if enable_prometheus_node_exporter | bool %}
- job_name: node
static_configs:
- - targets:
{% for host in groups['prometheus-node-exporter'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_node_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
+{% endfor %}
+{% for target in prometheus_node_exporter_targets_extra %}
+ - targets:
+ - '{{ target.target }}'
+{% if target.labels | default({}, true) %}
+ labels: {{ target.labels | to_json }}
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_mysqld_exporter | bool %}
- job_name: mysqld
static_configs:
- - targets:
{% for host in groups['prometheus-mysqld-exporter'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_mysqld_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_haproxy_exporter | bool %}
- job_name: haproxy
static_configs:
- - targets:
{% for host in groups['prometheus-haproxy-exporter'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_haproxy_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_rabbitmq_exporter | bool %}
- job_name: rabbitmq
static_configs:
- - targets:
{% for host in groups['rabbitmq'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_rabbitmq_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_memcached_exporter | bool %}
- job_name: memcached
static_configs:
- - targets:
{% for host in groups['prometheus-memcached-exporter'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_memcached_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_cadvisor | bool %}
- job_name: cadvisor
static_configs:
- - targets:
{% for host in groups["prometheus-cadvisor"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_cadvisor_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_fluentd_integration | bool %}
- job_name: fluentd
static_configs:
- - targets:
{% for host in groups['fluentd'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_fluentd_integration_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_ceph_mgr_exporter | bool %}
- job_name: ceph_mgr_exporter
honor_labels: true
+ scrape_interval: {{ prometheus_ceph_exporter_interval }}
static_configs:
- targets:
{% for exporter in prometheus_ceph_mgr_exporter_endpoints %}
@@ -114,9 +157,13 @@ scrape_configs:
- job_name: elasticsearch_exporter
scrape_interval: {{ prometheus_elasticsearch_exporter_interval }}
static_configs:
- - targets:
{% for host in groups["prometheus-elasticsearch-exporter"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_elasticsearch_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
@@ -155,9 +202,13 @@ scrape_configs:
scrape_interval: {{ prometheus_libvirt_exporter_interval }}
honor_labels: true
static_configs:
- - targets:
{% for host in groups["prometheus-libvirt-exporter"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_libvirt_exporter_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
@@ -167,18 +218,26 @@ scrape_configs:
scheme: https
{% endif %}
static_configs:
- - targets:
{% for host in groups["etcd"] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_etcd_integration_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
{% endif %}
{% if enable_prometheus_alertmanager | bool %}
- job_name: alertmanager
static_configs:
- - targets:
{% for host in groups['prometheus-alertmanager'] %}
+ - targets:
- '{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['prometheus_alertmanager_port'] }}'
+{% if hostvars[host].prometheus_instance_label | default(false, true) %}
+ labels:
+ instance: "{{ hostvars[host].prometheus_instance_label }}"
+{% endif %}
{% endfor %}
alerting:
diff --git a/ansible/roles/rabbitmq/defaults/main.yml b/ansible/roles/rabbitmq/defaults/main.yml
index ff7013adb7..5031cb4d7e 100644
--- a/ansible/roles/rabbitmq/defaults/main.yml
+++ b/ansible/roles/rabbitmq/defaults/main.yml
@@ -32,6 +32,7 @@ rabbitmq_services:
enabled: "{{ enable_outward_rabbitmq }}"
mode: "tcp"
external: true
+ external_fqdn: "{{ outward_rabbitmq_external_fqdn }}"
port: "{{ outward_rabbitmq_port }}"
host_group: "outward-rabbitmq"
frontend_tcp_extra:
diff --git a/ansible/roles/sahara/defaults/main.yml b/ansible/roles/sahara/defaults/main.yml
index c64a7df420..4256261af8 100644
--- a/ansible/roles/sahara/defaults/main.yml
+++ b/ansible/roles/sahara/defaults/main.yml
@@ -14,11 +14,14 @@ sahara_services:
mode: "http"
external: false
port: "{{ sahara_api_port }}"
+ listen_port: "{{ sahara_api_listen_port }}"
sahara_api_external:
enabled: "{{ enable_sahara }}"
mode: "http"
external: true
- port: "{{ sahara_api_port }}"
+ external_fqdn: "{{ sahara_external_fqdn }}"
+ port: "{{ sahara_api_public_port }}"
+ listen_port: "{{ sahara_api_listen_port }}"
sahara-engine:
container_name: sahara_engine
group: sahara-engine
@@ -122,8 +125,8 @@ sahara_engine_extra_volumes: "{{ sahara_extra_volumes }}"
####################
# OpenStack
####################
-sahara_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ sahara_api_port }}"
-sahara_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ sahara_api_port }}"
+sahara_internal_endpoint: "{{ sahara_internal_fqdn | kolla_url(internal_protocol, sahara_api_port) }}"
+sahara_public_endpoint: "{{ sahara_external_fqdn | kolla_url(public_protocol, sahara_api_public_port) }}"
sahara_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/senlin/defaults/main.yml b/ansible/roles/senlin/defaults/main.yml
index b1bfa11082..f06b241941 100644
--- a/ansible/roles/senlin/defaults/main.yml
+++ b/ansible/roles/senlin/defaults/main.yml
@@ -19,7 +19,8 @@ senlin_services:
enabled: "{{ enable_senlin }}"
mode: "http"
external: true
- port: "{{ senlin_api_port }}"
+ external_fqdn: "{{ senlin_external_fqdn }}"
+ port: "{{ senlin_api_public_port }}"
listen_port: "{{ senlin_api_listen_port }}"
senlin-conductor:
container_name: senlin_conductor
@@ -186,8 +187,8 @@ senlin_health_manager_extra_volumes: "{{ senlin_extra_volumes }}"
####################
# OpenStack
####################
-senlin_internal_endpoint: "{{ internal_protocol }}://{{ senlin_internal_fqdn | put_address_in_context('url') }}:{{ senlin_api_port }}"
-senlin_public_endpoint: "{{ public_protocol }}://{{ senlin_external_fqdn | put_address_in_context('url') }}:{{ senlin_api_port }}"
+senlin_internal_endpoint: "{{ senlin_internal_fqdn | kolla_url(internal_protocol, senlin_api_port) }}"
+senlin_public_endpoint: "{{ senlin_external_fqdn | kolla_url(public_protocol, senlin_api_public_port) }}"
senlin_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/skyline/defaults/main.yml b/ansible/roles/skyline/defaults/main.yml
index ca78515714..12a9ec84db 100644
--- a/ansible/roles/skyline/defaults/main.yml
+++ b/ansible/roles/skyline/defaults/main.yml
@@ -20,6 +20,7 @@ skyline_services:
enabled: "{{ enable_skyline }}"
mode: "http"
external: true
+ external_fqdn: "{{ skyline_apiserver_external_fqdn }}"
port: "{{ skyline_apiserver_port }}"
listen_port: "{{ skyline_apiserver_listen_port }}"
tls_backend: "{{ skyline_enable_tls_backend }}"
@@ -43,6 +44,7 @@ skyline_services:
enabled: "{{ enable_skyline }}"
mode: "http"
external: true
+ external_fqdn: "{{ skyline_console_external_fqdn }}"
port: "{{ skyline_console_port }}"
listen_port: "{{ skyline_console_listen_port }}"
tls_backend: "{{ skyline_enable_tls_backend }}"
@@ -128,8 +130,8 @@ skyline_console_extra_volumes: "{{ skyline_extra_volumes }}"
####################
# OpenStack
####################
-skyline_internal_endpoint: "{{ internal_protocol }}://{{ skyline_internal_fqdn | put_address_in_context('url') }}:{{ skyline_apiserver_port }}"
-skyline_public_endpoint: "{{ public_protocol }}://{{ skyline_external_fqdn | put_address_in_context('url') }}:{{ skyline_apiserver_port }}"
+skyline_apiserver_internal_base_endpoint: "{{ skyline_apiserver_internal_fqdn | kolla_url(internal_protocol, skyline_apiserver_port) }}"
+skyline_apiserver_public_base_endpoint: "{{ skyline_apiserver_external_fqdn | kolla_url(public_protocol, skyline_apiserver_public_port) }}"
skyline_logging_debug: "{{ openstack_logging_debug }}"
@@ -171,8 +173,8 @@ skyline_ks_services:
type: "panel"
description: "OpenStack Dashboard Service"
endpoints:
- - {'interface': 'internal', 'url': '{{ skyline_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ skyline_public_endpoint }}'}
+ - {'interface': 'internal', 'url': '{{ skyline_apiserver_internal_base_endpoint }}'}
+ - {'interface': 'public', 'url': '{{ skyline_apiserver_public_base_endpoint }}'}
skyline_ks_users:
- project: "service"
diff --git a/ansible/roles/skyline/templates/nginx.conf.j2 b/ansible/roles/skyline/templates/nginx.conf.j2
index 8295d728d5..cc6bb50d6d 100644
--- a/ansible/roles/skyline/templates/nginx.conf.j2
+++ b/ansible/roles/skyline/templates/nginx.conf.j2
@@ -87,8 +87,8 @@ http {
# Service: skyline
location {{ skyline_nginx_prefix }}/skyline/ {
- proxy_pass {{ internal_protocol }}://{{ skyline_internal_fqdn | put_address_in_context('url') }}:{{ skyline_apiserver_port }}/;
- proxy_redirect {{ internal_protocol }}://{{ skyline_internal_fqdn | put_address_in_context('url') }}:{{ skyline_apiserver_port }}/ {{ skyline_nginx_prefix }}/skyline/;
+ proxy_pass {{ internal_protocol }}://{{ skyline_apiserver_internal_fqdn | put_address_in_context('url') }}:{{ skyline_apiserver_port }}/;
+ proxy_redirect {{ internal_protocol }}://{{ skyline_apiserver_internal_fqdn | put_address_in_context('url') }}:{{ skyline_apiserver_port }}/ {{ skyline_nginx_prefix }}/skyline/;
proxy_buffering off;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
diff --git a/ansible/roles/solum/defaults/main.yml b/ansible/roles/solum/defaults/main.yml
index 4af0633a66..b8c705f9f3 100644
--- a/ansible/roles/solum/defaults/main.yml
+++ b/ansible/roles/solum/defaults/main.yml
@@ -30,24 +30,30 @@ solum_services:
mode: "http"
external: false
port: "{{ solum_application_deployment_port }}"
+ listen_port: "{{ solum_application_deployment_listen_port }}"
host_group: "solum-application-deployment"
solum_application_deployment_external:
enabled: "{{ enable_solum }}"
mode: "http"
external: true
- port: "{{ solum_application_deployment_port }}"
+ external_fqdn: "{{ solum_application_deployment_external_fqdn }}"
+ port: "{{ solum_application_deployment_public_port }}"
+ listen_port: "{{ solum_application_deployment_listen_port }}"
host_group: "solum-application-deployment"
solum_image_builder:
enabled: "{{ enable_solum }}"
mode: "http"
external: false
port: "{{ solum_image_builder_port }}"
+ listen_port: "{{ solum_image_builder_listen_port }}"
host_group: "solum-image-builder"
solum_image_builder_external:
enabled: "{{ enable_solum }}"
mode: "http"
external: true
- port: "{{ solum_image_builder_port }}"
+ external_fqdn: "{{ solum_image_builder_external_fqdn }}"
+ port: "{{ solum_image_builder_public_port }}"
+ listen_port: "{{ solum_image_builder_listen_port }}"
host_group: "solum-image-builder"
solum-conductor:
container_name: solum_conductor
@@ -198,11 +204,11 @@ solum_conductor_extra_volumes: "{{ solum_extra_volumes }}"
####################
# OpenStack
####################
-solum_image_builder_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ solum_image_builder_port }}"
-solum_image_builder_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ solum_image_builder_port }}"
+solum_image_builder_internal_endpoint: "{{ solum_image_builder_internal_fqdn | kolla_url(internal_protocol, solum_image_builder_port) }}"
+solum_image_builder_public_endpoint: "{{ solum_image_builder_external_fqdn | kolla_url(public_protocol, solum_image_builder_public_port) }}"
-solum_application_deployment_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ solum_application_deployment_port }}"
-solum_application_deployment_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ solum_application_deployment_port }}"
+solum_application_deployment_internal_endpoint: "{{ solum_application_deployment_internal_fqdn | kolla_url(internal_protocol, solum_application_deployment_port) }}"
+solum_application_deployment_public_endpoint: "{{ solum_application_deployment_external_fqdn | kolla_url(public_protocol, solum_application_deployment_public_port) }}"
solum_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/swift/defaults/main.yml b/ansible/roles/swift/defaults/main.yml
index 4469d7cd68..2ac01e6ed3 100644
--- a/ansible/roles/swift/defaults/main.yml
+++ b/ansible/roles/swift/defaults/main.yml
@@ -13,6 +13,7 @@ swift_services:
enabled: "{{ enable_swift }}"
mode: "http"
external: true
+ external_fqdn: "{{ swift_external_fqdn }}"
port: "{{ swift_proxy_server_listen_port }}"
####################
diff --git a/ansible/roles/tacker/defaults/main.yml b/ansible/roles/tacker/defaults/main.yml
index daa9980b0a..b37fb94117 100644
--- a/ansible/roles/tacker/defaults/main.yml
+++ b/ansible/roles/tacker/defaults/main.yml
@@ -15,12 +15,15 @@ tacker_services:
mode: "http"
external: false
port: "{{ tacker_server_port }}"
+ listen_port: "{{ tacker_server_listen_port }}"
custom_member_list: "{{ tacker_haproxy_members.split(';') }}"
tacker_server_external:
enabled: "{{ enable_tacker }}"
mode: "http"
external: true
- port: "{{ tacker_server_port }}"
+ external_fqdn: "{{ tacker_external_fqdn }}"
+ port: "{{ tacker_server_public_port }}"
+ listen_port: "{{ tacker_server_listen_port }}"
custom_member_list: "{{ tacker_haproxy_members.split(';') }}"
tacker-conductor:
container_name: "tacker_conductor"
@@ -134,8 +137,8 @@ tacker_hosts: "{{ [groups['tacker'] | first] }}"
####################
# OpenStack
####################
-tacker_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ tacker_server_port }}"
-tacker_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ tacker_server_port }}"
+tacker_internal_endpoint: "{{ tacker_internal_fqdn | kolla_url(internal_protocol, tacker_server_port) }}"
+tacker_public_endpoint: "{{ tacker_external_fqdn | kolla_url(public_protocol, tacker_server_public_port) }}"
tacker_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/trove/defaults/main.yml b/ansible/roles/trove/defaults/main.yml
index a857486326..a15a7351d8 100644
--- a/ansible/roles/trove/defaults/main.yml
+++ b/ansible/roles/trove/defaults/main.yml
@@ -20,9 +20,10 @@ trove_services:
enabled: "{{ enable_trove }}"
mode: "http"
external: true
- port: "{{ trove_api_port }}"
listen_port: "{{ trove_api_listen_port }}"
tls_backend: "{{ trove_enable_tls_backend }}"
+ external_fqdn: "{{ trove_external_fqdn }}"
+ port: "{{ trove_api_public_port }}"
trove-conductor:
container_name: trove_conductor
group: trove-conductor
@@ -158,8 +159,8 @@ trove_taskmanager_extra_volumes: "{{ trove_extra_volumes }}"
####################
# OpenStack
####################
-trove_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ trove_api_port }}/v1.0/%(tenant_id)s"
-trove_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ trove_api_port }}/v1.0/%(tenant_id)s"
+trove_internal_endpoint: "{{ trove_internal_fqdn | kolla_url(internal_protocol, trove_api_port, '/v1.0/%(tenant_id)s') }}"
+trove_public_endpoint: "{{ trove_external_fqdn | kolla_url(public_protocol, trove_api_public_port, '/v1.0/%(tenant_id)s') }}"
trove_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/venus/defaults/main.yml b/ansible/roles/venus/defaults/main.yml
index d2d044f592..7df363d2f6 100644
--- a/ansible/roles/venus/defaults/main.yml
+++ b/ansible/roles/venus/defaults/main.yml
@@ -18,7 +18,8 @@ venus_services:
enabled: "{{ enable_venus }}"
mode: "http"
external: true
- port: "{{ venus_api_port }}"
+ external_fqdn: "{{ venus_external_fqdn }}"
+ port: "{{ venus_api_public_port }}"
venus-manager:
container_name: venus_manager
group: venus-manager
@@ -93,8 +94,8 @@ venus_manager_extra_volumes: "{{ venus_extra_volumes }}"
####################
# OpenStack
####################
-venus_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ venus_api_port }}/v1.0/%(tenant_id)s"
-venus_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ venus_api_port }}/v1.0/%(tenant_id)s"
+venus_internal_endpoint: "{{ venus_internal_fqdn | kolla_url(internal_protocol, venus_api_port) }}"
+venus_public_endpoint: "{{ venus_external_fqdn | kolla_url(external_protocol, venus_api_port) }}"
venus_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/vitrage/defaults/main.yml b/ansible/roles/vitrage/defaults/main.yml
index 8e192d784d..781e51d188 100644
--- a/ansible/roles/vitrage/defaults/main.yml
+++ b/ansible/roles/vitrage/defaults/main.yml
@@ -18,7 +18,8 @@ vitrage_services:
enabled: "{{ enable_vitrage }}"
mode: "http"
external: true
- port: "{{ vitrage_api_port }}"
+ external_fqdn: "{{ vitrage_external_fqdn }}"
+ port: "{{ vitrage_api_public_port }}"
vitrage-notifier:
container_name: vitrage_notifier
group: vitrage-notifier
diff --git a/ansible/roles/watcher/defaults/main.yml b/ansible/roles/watcher/defaults/main.yml
index 03f3445536..9b42051bcf 100644
--- a/ansible/roles/watcher/defaults/main.yml
+++ b/ansible/roles/watcher/defaults/main.yml
@@ -14,11 +14,14 @@ watcher_services:
mode: "http"
external: false
port: "{{ watcher_api_port }}"
+ listen_port: "{{ watcher_api_listen_port }}"
watcher_api_external:
enabled: "{{ enable_watcher }}"
mode: "http"
external: true
- port: "{{ watcher_api_port }}"
+ external_fqdn: "{{ watcher_external_fqdn }}"
+ port: "{{ watcher_api_public_port }}"
+ listen_port: "{{ watcher_api_listen_port }}"
watcher-applier:
container_name: watcher_applier
group: watcher-applier
@@ -151,8 +154,8 @@ watcher_engine_extra_volumes: "{{ watcher_extra_volumes }}"
####################
# OpenStack
####################
-watcher_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ watcher_api_port }}"
-watcher_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ watcher_api_port }}"
+watcher_internal_endpoint: "{{ watcher_internal_fqdn | kolla_url(internal_protocol, watcher_api_port) }}"
+watcher_public_endpoint: "{{ watcher_external_fqdn | kolla_url(public_protocol, watcher_api_public_port) }}"
watcher_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/zun/defaults/main.yml b/ansible/roles/zun/defaults/main.yml
index 61719a179e..aa687e5a1b 100644
--- a/ansible/roles/zun/defaults/main.yml
+++ b/ansible/roles/zun/defaults/main.yml
@@ -14,11 +14,14 @@ zun_services:
mode: "http"
external: false
port: "{{ zun_api_port }}"
+ listen_port: "{{ zun_api_listen_port }}"
zun_api_external:
enabled: "{{ enable_zun }}"
mode: "http"
external: true
- port: "{{ zun_api_port }}"
+ external_fqdn: "{{ zun_external_fqdn }}"
+ port: "{{ zun_api_public_port }}"
+ listen_port: "{{ zun_api_listen_port }}"
zun-wsproxy:
container_name: zun_wsproxy
group: zun-wsproxy
@@ -206,8 +209,8 @@ zun_cni_daemon_extra_volumes: "{{ zun_extra_volumes }}"
####################
## OpenStack
####################
-zun_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ zun_api_port }}/v1/"
-zun_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ zun_api_port }}/v1/"
+zun_internal_endpoint: "{{ zun_internal_fqdn | kolla_url(internal_protocol, zun_api_port, '/v1/') }}"
+zun_public_endpoint: "{{ zun_external_fqdn | kolla_url(public_protocol, zun_api_public_port, '/v1/') }}"
zun_logging_debug: "{{ openstack_logging_debug }}"
diff --git a/ansible/roles/zun/templates/zun.conf.j2 b/ansible/roles/zun/templates/zun.conf.j2
index 6971b81d81..dd5634fd2f 100644
--- a/ansible/roles/zun/templates/zun.conf.j2
+++ b/ansible/roles/zun/templates/zun.conf.j2
@@ -114,7 +114,7 @@ host_shared_with_nova = {{ inventory_hostname in groups['compute'] and enable_no
[websocket_proxy]
wsproxy_host = {{ api_interface_address }}
wsproxy_port = {{ zun_wsproxy_port }}
-base_url = {{ zun_wsproxy_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ zun_wsproxy_port }}
+base_url = {{ zun_wsproxy_protocol }}://{{ zun_external_fqdn | put_address_in_context('url') }}:{{ zun_wsproxy_port }}
[docker]
api_url = tcp://{{ api_interface_address | put_address_in_context('url') }}:2375
diff --git a/ansible/site.yml b/ansible/site.yml
index 4ba77cdc71..c933ff5876 100644
--- a/ansible/site.yml
+++ b/ansible/site.yml
@@ -41,6 +41,7 @@
- enable_iscsid_{{ enable_iscsid | bool }}
- enable_keystone_{{ enable_keystone | bool }}
- enable_kuryr_{{ enable_kuryr | bool }}
+ - enable_letsencrypt_{{ enable_letsencrypt | bool }}
- enable_loadbalancer_{{ enable_loadbalancer | bool }}
- enable_magnum_{{ enable_magnum | bool }}
- enable_manila_{{ enable_manila | bool }}
@@ -200,6 +201,11 @@
tasks_from: loadbalancer
tags: keystone
when: enable_keystone | bool
+ - include_role:
+ name: letsencrypt
+ tasks_from: loadbalancer
+ tags: letsencrypt
+ when: enable_letsencrypt | bool
- include_role:
name: magnum
tasks_from: loadbalancer
@@ -340,6 +346,16 @@
- enable_haproxy | bool
- kolla_action in ['deploy', 'reconfigure', 'upgrade', 'config']
+- name: Apply role letsencrypt
+ gather_facts: false
+ hosts:
+ - letsencrypt
+ - '&enable_letsencrypt_True'
+ serial: '{{ kolla_serial|default("0") }}'
+ roles:
+ - { role: letsencrypt,
+ tags: letsencrypt }
+
- name: Apply role collectd
gather_facts: false
hosts:
@@ -966,3 +982,13 @@
roles:
- { role: skyline,
tags: skyline }
+
+- name: Apply role caso
+ gather_facts: false
+ hosts:
+ - caso
+ serial: '{{ kolla_serial|default("0") }}'
+ roles:
+ - { role: caso,
+ tags: caso,
+ when: enable_caso | bool }
diff --git a/doc/source/admin/tls.rst b/doc/source/admin/tls.rst
index f2acb9bb3d..2810ecdc83 100644
--- a/doc/source/admin/tls.rst
+++ b/doc/source/admin/tls.rst
@@ -288,6 +288,35 @@ disable verification of the backend certificate:
.. _admin-tls-generating-a-private-ca:
+Generating TLS certificates with Let's Encrypt
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Let's Encrypt is a free, automated, and open certificate authority.
+
+To enable OpenStack to deploy the Let's Encrypt container to fetch
+certificates from the Let's Encrypt certificate authority, the following
+must be configured in ``globals.yml``:
+
+.. code-block:: yaml
+
+ enable_letsencrypt: "yes"
+ letsencrypt_email: ""
+
+The Let's Encrypt container will attempt to renew your certificates every 12
+hours. If the certificates are renewed, they will automatically be deployed
+to the HAProxy containers using SSH.
+
+.. note::
+
+ If ``letsencrypt_email`` is not valid email, letsencrypt role will
+ not work correctly.
+
+.. note::
+
+ If ``enable_letsencrypt`` is set to true, haproxy's socket will run with
+ admin access level. This is needed so Let's Encrypt can interact
+ with HAProxy.
+
Generating a Private Certificate Authority
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/reference/high-availability/haproxy-guide.rst b/doc/source/reference/high-availability/haproxy-guide.rst
index d3337ecab0..418ad534fb 100644
--- a/doc/source/reference/high-availability/haproxy-guide.rst
+++ b/doc/source/reference/high-availability/haproxy-guide.rst
@@ -22,6 +22,26 @@ setting the following in ``/etc/kolla/globals.yml``:
enable_haproxy: "no"
enable_keepalived: "no"
+Single external frontend for services
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Single external frontend for particular service can be enabled by adding the
+following in ``/etc/kolla/globals.yml`` (feature and example services):
+
+.. code-block:: yaml
+
+ haproxy_single_external_frontend: true
+
+ nova_external_fqdn: "nova.example.com"
+ neutron_external_fqdn: "neutron.example.com"
+ horizon_external_fqdn: "horizon.example.com"
+ opensearch_external_fqdn: "opensearch.example.com"
+ grafana_external_fqdn: "grafana.example.com"
+
+
+The abovementioned functionality allows for exposing of services on separate
+fqdns on commonly used port i.e. 443 instead of the usual high ports.
+
Configuration
~~~~~~~~~~~~~
@@ -62,3 +82,13 @@ To set weight of backend per service, modify inventory file as below:
server1 haproxy_nova_api_weight=10
server2 haproxy_nova_api_weight=2 haproxy_keystone_internal_weight=10
server3 haproxy_keystone_admin_weight=50
+
+HTTP/2 Support
+---------------
+
+HAProxy with HTTP/2 frontend support is enabled by default. It may be
+disabled by setting the following in ``/etc/kolla/globals.yml``:
+
+.. code-block:: yaml
+
+ haproxy_enable_http2: "no"
diff --git a/doc/source/reference/logging-and-monitoring/prometheus-guide.rst b/doc/source/reference/logging-and-monitoring/prometheus-guide.rst
index 76d3ec008b..f60229b059 100644
--- a/doc/source/reference/logging-and-monitoring/prometheus-guide.rst
+++ b/doc/source/reference/logging-and-monitoring/prometheus-guide.rst
@@ -34,6 +34,26 @@ In order to remove leftover volume containing Prometheus 1.x data, execute:
on all hosts wherever Prometheus was previously deployed.
+Basic Auth
+~~~~~~~~~~
+
+Prometheus is protected with basic HTTP authentication. Kolla-ansible will
+create the following users: ``admin`` and ``grafana`` (if grafana is
+enabled). The grafana username can be overidden using the variable
+``prometheus_grafana_user``. The passwords are defined by the
+``prometheus_password`` and ``prometheus_grafana_password`` variables in
+``passwords.yml``. The list of basic auth users can be extended using the
+``prometheus_basic_auth_users_extra`` variable:
+
+.. code-block:: yaml
+
+ prometheus_basic_auth_users_extra:
+ - username: user
+ password: hello
+ enabled: true
+
+or completely overriden with the ``prometheus_basic_auth_users`` variable.
+
Extending the default command line options
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -156,3 +176,66 @@ files:
- 192.168.1.1
labels:
job: ipmi_exporter
+
+Metric Instance labels
+~~~~~~~~~~~~~~~~~~~~~~
+
+Previously, Prometheus metrics used to label instances based on their IP
+addresses. This behaviour can now be changed such that instances can be
+labelled based on their inventory hostname instead. The IP address remains as
+the target address, therefore, even if the hostname is unresolvable, it doesn't
+pose an issue.
+
+The default behavior still labels instances with their IP addresses. However,
+this can be adjusted by changing the ``prometheus_instance_label`` variable.
+This variable accepts the following values:
+
+* ``None``: Instance labels will be IP addresses (default)
+* ``{{ ansible_facts.hostname }}``: Instance labels will be hostnames
+* ``{{ ansible_facts.nodename }}``: Instance labels will FQDNs
+
+To implement this feature, modify the configuration file
+``/etc/kolla/globals.yml`` and update the ``prometheus_instance_label``
+variable accordingly. Remember, changing this variable will cause Prometheus to
+scrape metrics with new names for a short period. This will result in duplicate
+metrics until all metrics are replaced with their new labels.
+
+.. code-block:: yaml
+
+ prometheus_instance_label: "{{ ansible_facts.hostname }}"
+
+This metric labeling feature may become the default setting in future releases.
+Therefore, if you wish to retain the current default (IP address labels), make
+sure to set the ``prometheus_instance_label`` variable to ``None``.
+
+.. note::
+
+ This feature may generate duplicate metrics temporarily while Prometheus
+ updates the metric labels. Please be aware of this while analyzing metrics
+ during the transition period.
+
+Exporter configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+Node Exporter
+-------------
+
+Sometimes it can be useful to monitor hosts outside of the Kolla deployment.
+One method of doing this is to configure a list of additional targets using the
+``prometheus_node_exporter_targets_extra`` variable. The format of which
+should be a list of dictionaries with the following keys:
+
+* target: URL of node exporter to scrape
+* labels: (Optional) A list of labels to set on the metrics scaped from this
+ exporter.
+
+For example:
+
+.. code-block:: yaml
+ :caption: ``/etc/kolla/globals.yml``
+
+ prometheus_node_exporter_targets_extra:
+ - target: http://10.0.0.1:1234
+ labels:
+ instance: host1
+
diff --git a/doc/source/reference/networking/neutron.rst b/doc/source/reference/networking/neutron.rst
index a024744fed..2b7906913c 100644
--- a/doc/source/reference/networking/neutron.rst
+++ b/doc/source/reference/networking/neutron.rst
@@ -20,13 +20,20 @@ Neutron external interface is used for communication with the external world,
for example provider networks, routers and floating IPs.
For setting up the neutron external interface modify
``/etc/kolla/globals.yml`` setting ``neutron_external_interface`` to the
-desired interface name. This interface is used by hosts in the ``network``
-group. It is also used by hosts in the ``compute`` group if
+desired interface name or comma-separated list of interface names. Its default
+value is ``eth1``. These external interfaces are used by hosts in the
+``network`` group. They are also used by hosts in the ``compute`` group if
``enable_neutron_provider_networks`` is set or DVR is enabled.
-The interface is plugged into a bridge (Open vSwitch or Linux Bridge, depending
-on the driver) defined by ``neutron_bridge_name``, which defaults to ``br-ex``.
-The default Neutron physical network is ``physnet1``.
+The external interfaces are each plugged into a bridge (Open vSwitch or Linux
+Bridge, depending on the driver) defined by ``neutron_bridge_name``, which
+defaults to ``br-ex``. When there are multiple external interfaces,
+``neutron_bridge_name`` should be a comma-separated list of the same length.
+
+The default Neutron physical network is ``physnet1``, or ``physnet1`` to
+``physnetN`` when there are multiple external network interfaces. This may be
+changed by setting ``neutron_physical_networks`` to a comma-separated list of
+networks of the same length.
Example: single interface
-------------------------
@@ -54,6 +61,30 @@ These two lists are "zipped" together, such that ``eth1`` is plugged into the
Ansible maps these interfaces to Neutron physical networks ``physnet1`` and
``physnet2`` respectively.
+Example: custom physical networks
+---------------------------------
+
+Sometimes we may want to customise the physical network names used. This may be
+to allow for not all hosts having access to all physical networks, or to use
+more descriptive names.
+
+For example, in an environment with a separate physical network for Ironic
+provisioning, controllers might have access to two physical networks:
+
+.. code-block:: yaml
+
+ neutron_external_interface: "eth1,eth2"
+ neutron_bridge_name: "br-ex1,br-ex2"
+ neutron_physical_network: "physnet1,physnet2"
+
+While compute nodes have access only to ``physnet2``.
+
+.. code-block:: yaml
+
+ neutron_external_interface: "eth1"
+ neutron_bridge_name: "br-ex1"
+ neutron_physical_network: "physnet2"
+
Example: shared interface
-------------------------
diff --git a/doc/source/reference/networking/octavia.rst b/doc/source/reference/networking/octavia.rst
index b498673998..53f266065f 100644
--- a/doc/source/reference/networking/octavia.rst
+++ b/doc/source/reference/networking/octavia.rst
@@ -75,6 +75,16 @@ used to encrypt the CA key:
.. _octavia-network:
+Monitoring certificate expiry
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can use the following command to check if any of the certificates will
+expire within a given number of days:
+
+.. code-block:: console
+
+ kolla-ansible octavia-certificates --check-expiry
+
Networking
----------
diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml
index aa192aee81..80b3488558 100644
--- a/etc/kolla/globals.yml
+++ b/etc/kolla/globals.yml
@@ -269,6 +269,19 @@ workaround_ansible_issue_8743: yes
# Please read the docs for more details.
#acme_client_servers: []
+####################
+# LetsEncrypt options
+####################
+# This option is required for letsencrypt role to work properly.
+#letsencrypt_email: ""
+
+####################
+# LetsEncrypt certificate server options
+####################
+#letsencrypt_cert_server: "https://acme-v02.api.letsencrypt.org/directory"
+# attempt to renew Let's Encrypt certificate every 12 hours
+#letsencrypt_cron_renew_schedule: "0 */12 * * *"
+
################
# Region options
################
diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml
index 8d0e7343a1..534b832991 100644
--- a/etc/kolla/passwords.yml
+++ b/etc/kolla/passwords.yml
@@ -9,6 +9,11 @@
rbd_secret_uuid:
cinder_rbd_secret_uuid:
+############
+# cASO
+############
+caso_keystone_password:
+
###################
# Database options
####################
@@ -205,6 +210,10 @@ neutron_ssh_key:
private_key:
public_key:
+haproxy_ssh_key:
+ private_key:
+ public_key:
+
####################
# Gnocchi options
####################
@@ -242,6 +251,9 @@ redis_master_password:
####################
prometheus_mysql_exporter_database_password:
prometheus_alertmanager_password:
+prometheus_password:
+prometheus_grafana_password:
+prometheus_bcrypt_salt:
###############################
# OpenStack identity federation
diff --git a/kolla_ansible/cmd/genpwd.py b/kolla_ansible/cmd/genpwd.py
index e523372e22..89b7bbd0ab 100755
--- a/kolla_ansible/cmd/genpwd.py
+++ b/kolla_ansible/cmd/genpwd.py
@@ -20,6 +20,7 @@
import string
import sys
+from ansible.utils.encrypt import random_salt
from cryptography import fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
@@ -56,7 +57,7 @@ def generate_RSA(bits=4096):
def genpwd(passwords_file, length, uuid_keys, ssh_keys, blank_keys,
- fernet_keys, hmac_md5_keys):
+ fernet_keys, hmac_md5_keys, bcrypt_keys):
try:
with open(passwords_file, 'r') as f:
passwords = yaml.safe_load(f.read())
@@ -98,6 +99,11 @@ def genpwd(passwords_file, length, uuid_keys, ssh_keys, blank_keys,
.hexdigest())
elif k in fernet_keys:
passwords[k] = fernet.Fernet.generate_key().decode()
+ elif k in bcrypt_keys:
+ # NOTE(wszusmki) To be compatible with the ansible
+ # password_hash filter, we use the utility function from the
+ # ansible library.
+ passwords[k] = random_salt(22)
else:
passwords[k] = ''.join([
random.SystemRandom().choice(
@@ -137,8 +143,9 @@ def main():
# SSH key pair
ssh_keys = ['kolla_ssh_key', 'nova_ssh_key',
- 'keystone_ssh_key', 'bifrost_ssh_key', 'octavia_amp_ssh_key',
- 'neutron_ssh_key']
+ 'keystone_ssh_key', 'bifrost_ssh_key',
+ 'octavia_amp_ssh_key', 'neutron_ssh_key',
+ 'haproxy_ssh_key']
# If these keys are None, leave them as None
blank_keys = ['docker_registry_password']
@@ -150,11 +157,14 @@ def main():
# Fernet keys
fernet_keys = ['barbican_crypto_key']
+ # bcrypt salts
+ bcrypt_keys = ['prometheus_bcrypt_salt']
+
# length of password
length = 40
genpwd(passwords_file, length, uuid_keys, ssh_keys, blank_keys,
- fernet_keys, hmac_md5_keys)
+ fernet_keys, hmac_md5_keys, bcrypt_keys)
if __name__ == '__main__':
diff --git a/kolla_ansible/kolla_url.py b/kolla_ansible/kolla_url.py
new file mode 100644
index 0000000000..a217491e6f
--- /dev/null
+++ b/kolla_ansible/kolla_url.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2022 StackHPC Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from kolla_ansible.put_address_in_context import put_address_in_context
+
+
+def kolla_url(fqdn, protocol, port, path='', context='url'):
+ """generates url
+
+ :param fqdn:
+ :param protocol: http, ws, https or wss
+ :param port: port (omits 80 on http and 443 on https in output)
+ :param path: path - optional
+ :returns: string with url
+ """
+
+ fqdn = put_address_in_context(fqdn, context)
+ port = int(port)
+
+ if ((protocol == 'http' and port == 80) or
+ (protocol == 'https' and port == 443) or
+ (protocol == 'ws' and port == 80) or
+ (protocol == 'wss' and port == 443)):
+ address = f"{protocol}://{fqdn}{path}"
+ else:
+ address = f"{protocol}://{fqdn}:{port}{path}"
+
+ return address
diff --git a/kolla_ansible/tests/unit/test_address_filters.py b/kolla_ansible/tests/unit/test_address_filters.py
index be2cee78d9..589531639a 100644
--- a/kolla_ansible/tests/unit/test_address_filters.py
+++ b/kolla_ansible/tests/unit/test_address_filters.py
@@ -20,6 +20,7 @@
from kolla_ansible.exception import FilterError
from kolla_ansible.kolla_address import kolla_address
+from kolla_ansible.kolla_url import kolla_url
from kolla_ansible.put_address_in_context import put_address_in_context
from kolla_ansible.tests.unit.helpers import _to_bool
@@ -323,3 +324,66 @@ def test_valid_ipv6_config_do_not_ignore_any_vip_address(self):
},
})
self.assertEqual(addr, kolla_address(context, 'api'))
+
+
+class TestKollaUrlFilter(unittest.TestCase):
+
+ def test_https_443_path(self):
+ protocol = 'https'
+ fqdn = 'kolla.external'
+ port = 443
+ path = '/v2'
+ self.assertEqual("https://kolla.external/v2",
+ kolla_url(fqdn, protocol, port, path))
+
+ def test_http_80_path(self):
+ protocol = 'http'
+ fqdn = 'kolla.external'
+ port = 80
+ path = '/v2'
+ self.assertEqual("http://kolla.external/v2",
+ kolla_url(fqdn, protocol, port, path))
+
+ def test_https_8443_path(self):
+ protocol = 'https'
+ fqdn = 'kolla.external'
+ port = 8443
+ path = '/v2'
+ self.assertEqual("https://kolla.external:8443/v2",
+ kolla_url(fqdn, protocol, port, path))
+
+ def test_http_8080_path(self):
+ protocol = 'http'
+ fqdn = 'kolla.external'
+ port = 8080
+ path = '/v2'
+ self.assertEqual("http://kolla.external:8080/v2",
+ kolla_url(fqdn, protocol, port, path))
+
+ def test_https_443_nopath(self):
+ protocol = 'https'
+ fqdn = 'kolla.external'
+ port = 443
+ self.assertEqual("https://kolla.external",
+ kolla_url(fqdn, protocol, port))
+
+ def test_http_80_nopath(self):
+ protocol = 'http'
+ fqdn = 'kolla.external'
+ port = 80
+ self.assertEqual("http://kolla.external",
+ kolla_url(fqdn, protocol, port))
+
+ def test_https_8443_nopath(self):
+ protocol = 'https'
+ fqdn = 'kolla.external'
+ port = 8443
+ self.assertEqual("https://kolla.external:8443",
+ kolla_url(fqdn, protocol, port))
+
+ def test_http_8080_nopath(self):
+ protocol = 'http'
+ fqdn = 'kolla.external'
+ port = 8080
+ self.assertEqual("http://kolla.external:8080",
+ kolla_url(fqdn, protocol, port))
diff --git a/releasenotes/notes/add-ceph-metrics-scrape-interval-3ee39fba696860e9.yaml b/releasenotes/notes/add-ceph-metrics-scrape-interval-3ee39fba696860e9.yaml
new file mode 100644
index 0000000000..8e4d9eee75
--- /dev/null
+++ b/releasenotes/notes/add-ceph-metrics-scrape-interval-3ee39fba696860e9.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds a new variable for controlling Ceph's metrics scrape interval and
+ sets it to recommended 15s value.
diff --git a/releasenotes/notes/add-haproxy-http2-support-3a8575889cabe064.yaml b/releasenotes/notes/add-haproxy-http2-support-3a8575889cabe064.yaml
new file mode 100644
index 0000000000..c188f3eaec
--- /dev/null
+++ b/releasenotes/notes/add-haproxy-http2-support-3a8575889cabe064.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Adds http/2 support to HAProxy frontends.
diff --git a/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml b/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml
new file mode 100644
index 0000000000..47687f627d
--- /dev/null
+++ b/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - Add Lets Encrypt TLS certificate service integration into Openstack
+ deployment. Enables trusted TLS certificate generation option for
+ secure communcation with OpenStack HAProxy instances using
+ ``letsencrypt_email``, ``kolla_internal_fqdn`` and/or
+ ``kolla_external_fqdn`` is required. One container runs an Apache
+ ACME client webserver and one runs Lego for certificate retrieval
+ and renewal. The Lego container starts a cron job which attempts
+ to renew certificates every 12 hours.
diff --git a/releasenotes/notes/adds-node-exporter-targets-extra-c037d4755d1002e8.yaml b/releasenotes/notes/adds-node-exporter-targets-extra-c037d4755d1002e8.yaml
new file mode 100644
index 0000000000..39a25b042c
--- /dev/null
+++ b/releasenotes/notes/adds-node-exporter-targets-extra-c037d4755d1002e8.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds ``prometheus_node_exporter_targets_extra`` to add additional scrape
+ targets to the node exporter job. See kolla-ansible-doc:`documentation
+ ` for more
+ information.
diff --git a/releasenotes/notes/cloudkitty-prometheus-basic-auth-baa441852d57d121.yaml b/releasenotes/notes/cloudkitty-prometheus-basic-auth-baa441852d57d121.yaml
new file mode 100644
index 0000000000..0075fe83ae
--- /dev/null
+++ b/releasenotes/notes/cloudkitty-prometheus-basic-auth-baa441852d57d121.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes CloudKitty failing to query Prometheus now that basic authentication
+ is required.
diff --git a/releasenotes/notes/expose-prometheus-on-external-api-78d5fff60f6e75a5.yaml b/releasenotes/notes/expose-prometheus-on-external-api-78d5fff60f6e75a5.yaml
new file mode 100644
index 0000000000..d580e383a8
--- /dev/null
+++ b/releasenotes/notes/expose-prometheus-on-external-api-78d5fff60f6e75a5.yaml
@@ -0,0 +1,16 @@
+---
+features:
+ - |
+ Adds support for exposing Prometheus server on the external interface.
+ This is disabled by default and can be enabled by setting
+ ``enable_prometheus_server_external`` to ``true``. Basic auth is used to
+ protect the endpoint.
+ - |
+ Adds ``prometheus_external_fqdn`` and ``prometheus_internal_fqdn`` to
+ customise prometheus FQDNs.
+upgrade:
+ - |
+ Prometheus now uses basic auth. The password is under the key
+ ``prometheus_password`` in the Kolla passwords file. The username is
+ ``admin``. The default set of users can be changed using the variable:
+ ``prometheus_basic_auth_users``.
diff --git a/releasenotes/notes/fix-expose-prometheus-externally-with-single-frontend-45ca4e25c8948393.yaml b/releasenotes/notes/fix-expose-prometheus-externally-with-single-frontend-45ca4e25c8948393.yaml
new file mode 100644
index 0000000000..b720e60ed5
--- /dev/null
+++ b/releasenotes/notes/fix-expose-prometheus-externally-with-single-frontend-45ca4e25c8948393.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes an issue when using ``enable_prometheus_server_external`` in
+ conjunction with ``haproxy_single_external_frontend``.
diff --git a/releasenotes/notes/fix-prometheus-grafana-datasource-e11074378004b5ca.yaml b/releasenotes/notes/fix-prometheus-grafana-datasource-e11074378004b5ca.yaml
new file mode 100644
index 0000000000..2d0007660e
--- /dev/null
+++ b/releasenotes/notes/fix-prometheus-grafana-datasource-e11074378004b5ca.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fixes prometheus grafana datasource using incorrect basic auth credentials.
diff --git a/releasenotes/notes/fixes-issue-with-prometheus-scraping-itself-308fa19734fd0939.yaml b/releasenotes/notes/fixes-issue-with-prometheus-scraping-itself-308fa19734fd0939.yaml
new file mode 100644
index 0000000000..3bd26a09f2
--- /dev/null
+++ b/releasenotes/notes/fixes-issue-with-prometheus-scraping-itself-308fa19734fd0939.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes an issue with prometheus scraping itself now that basic auth has
+ been enabled.
diff --git a/releasenotes/notes/friendly-hostnames-1bb1254b2b434a11.yaml b/releasenotes/notes/friendly-hostnames-1bb1254b2b434a11.yaml
new file mode 100644
index 0000000000..153b8c4652
--- /dev/null
+++ b/releasenotes/notes/friendly-hostnames-1bb1254b2b434a11.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Adds the ability for the instance label on prometheus metrics to be
+ replaced with the inventory hostname as opposed to using the ip address as
+ the metric label. The ip address is still used as the target address which
+ means that there is no issue of the hostname being unresolvable.
+
+ More information on how to use this feature can be found in the
+ reference documentation for logging and monitoring.
diff --git a/releasenotes/notes/grafana-openseach-add-log-level-d64e304977d4f550.yaml b/releasenotes/notes/grafana-openseach-add-log-level-d64e304977d4f550.yaml
new file mode 100644
index 0000000000..ae25c2346f
--- /dev/null
+++ b/releasenotes/notes/grafana-openseach-add-log-level-d64e304977d4f550.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Configures the log level field for the Grafana OpenSearch datasource. This
+ allows for logs to be coloured based on log level. To apply this you need
+ to delete the datasource and reconfigure grafana.
diff --git a/releasenotes/notes/grafana-opensearch-datasource-configuration-04202c059f1abd05.yaml b/releasenotes/notes/grafana-opensearch-datasource-configuration-04202c059f1abd05.yaml
new file mode 100644
index 0000000000..4b966b144a
--- /dev/null
+++ b/releasenotes/notes/grafana-opensearch-datasource-configuration-04202c059f1abd05.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Updates the default Grafana OpenSearch datasource configuration to use
+ values for OpenSearch that work out of the box. Replaces the Elasticsearch
+ values that were previously being used. The new configuration can be
+ applied by deleting your datasource and reconfiguring Grafana through kolla
+ ansible. In order to prevent dashboards from breaking when the datasource
+ is deleted, one should use `datasource variables
+ `__
+ in Grafana. See bug `2039500 `__.
diff --git a/releasenotes/notes/grafana-volume-81d569128d9e020f.yaml b/releasenotes/notes/grafana-volume-81d569128d9e020f.yaml
new file mode 100644
index 0000000000..f449fc34f9
--- /dev/null
+++ b/releasenotes/notes/grafana-volume-81d569128d9e020f.yaml
@@ -0,0 +1,23 @@
+---
+fixes:
+ - |
+ Fixes bug `#2039498
+ `__ where the
+ grafana docker volume was bind mounted over Grafana plugins installed at
+ image build time. This is fixed by copying the dashboards into the
+ container from an existing bind mount instead of using the ``grafana``
+ volume. This however leaves behind the volume which can be removed by
+ setting ``grafana_remove_old_volume`` to ``true``. Please note that any
+ plugins installed via the cli directly and not through kolla will be lost
+ when doing this. In a future release ``grafana_remove_old_volume`` will
+ default to ``true``.
+upgrade:
+ - |
+ The ``grafana`` volume is no longer used. If you wish to automatically
+ remove the old volume, set ``grafana_remove_old_volume`` to ``true``. Note
+ that doing this will lose any plugins installed via the cli directly and
+ not through kolla. If you have previously installed Grafana plugins via the
+ Grafana UI, or CLI, you must change to installing them at image `build time
+ `__.
+ The grafana volume, which will contain existing custom plugins, will be
+ automatically removed in the D release.
diff --git a/releasenotes/notes/haproxy-single-external-frontend-7dadd1fff8a8dfbd.yaml b/releasenotes/notes/haproxy-single-external-frontend-7dadd1fff8a8dfbd.yaml
new file mode 100644
index 0000000000..a91c08eb06
--- /dev/null
+++ b/releasenotes/notes/haproxy-single-external-frontend-7dadd1fff8a8dfbd.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds single service external frontend feature to haproxy.
+ Details are in the
+ `haproxy guide `_
+ section of the documentation.
diff --git a/releasenotes/notes/magnum-kubeconfig-71934a2980c7e74f.yaml b/releasenotes/notes/magnum-kubeconfig-71934a2980c7e74f.yaml
new file mode 100644
index 0000000000..80155f498b
--- /dev/null
+++ b/releasenotes/notes/magnum-kubeconfig-71934a2980c7e74f.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds support for copying in ``{{ node_custom_config }}/magnum/kubeconfig``
+ to Magnum containers for ``magnum-cluster-api`` driver.
diff --git a/releasenotes/notes/neutron-physical-networks-5b908bed9809c3b4.yaml b/releasenotes/notes/neutron-physical-networks-5b908bed9809c3b4.yaml
new file mode 100644
index 0000000000..a62e400089
--- /dev/null
+++ b/releasenotes/notes/neutron-physical-networks-5b908bed9809c3b4.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Adds a ``neutron_physical_networks`` variable for customising Neutron
+ physical network names. The default behaviour of using ``physnet1`` to
+ ``physnetN`` is unchanged.
diff --git a/releasenotes/notes/octavia-check-certificate-expiry-9a80a68cf31cbba4.yaml b/releasenotes/notes/octavia-check-certificate-expiry-9a80a68cf31cbba4.yaml
new file mode 100644
index 0000000000..46f832abde
--- /dev/null
+++ b/releasenotes/notes/octavia-check-certificate-expiry-9a80a68cf31cbba4.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ The flag ``--check-expiry`` has been added to the ``octavia-certificates``
+ command. ``kolla-ansible octavia-certificates --check-expiry `` will
+ check if the Octavia certificates are set to expire within a given number
+ of days.
diff --git a/requirements.txt b/requirements.txt
index 59147c1bd1..dcbbd1f10c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,3 +18,7 @@ jmespath>=0.9.3 # MIT
# Hashicorp Vault
hvac>=0.10.1
+
+# Password hashing
+bcrypt>=3.0.0 # Apache-2.0
+passlib[bcrypt]>=1.0.0 # BSD
diff --git a/tests/deploy.sh b/tests/deploy.sh
index 7405d43a30..ca419c9dfa 100755
--- a/tests/deploy.sh
+++ b/tests/deploy.sh
@@ -6,18 +6,63 @@ set -o errexit
# Enable unbuffered output for Ansible in Jenkins.
export PYTHONUNBUFFERED=1
+function init_pebble {
-function deploy {
- RAW_INVENTORY=/etc/kolla/inventory
+ sudo echo "[i] Pulling letsencrypt/pebble" > /tmp/logs/ansible/certificates
+ sudo docker pull letsencrypt/pebble &>> /tmp/logs/ansible/certificates
+
+ sudo echo "[i] Force removing old pebble container" &>> /tmp/logs/ansible/certificates
+ sudo docker rm -f pebble &>> /tmp/logs/ansible/certificates
+
+ sudo echo "[i] Run new pebble container" &>> /tmp/logs/ansible/certificates
+ sudo docker run --name pebble --rm -d -e "PEBBLE_VA_NOSLEEP=1" -e "PEBBLE_VA_ALWAYS_VALID=1" --net=host letsencrypt/pebble &>> /tmp/logs/ansible/certificates
+
+ sudo echo "[i] Wait for pebble container be up" &>> /tmp/logs/ansible/certificates
+ # wait until pebble starts
+ while ! sudo docker logs pebble | grep -q "Listening on"; do
+ sleep 1
+ done
+ sudo echo "[i] Wait for pebble container done" &>> /tmp/logs/ansible/certificates
+
+ sudo echo "[i] Pebble container logs" &>> /tmp/logs/ansible/certificates
+ sudo docker logs pebble &>> /tmp/logs/ansible/certificates
+}
+
+function pebble_cacert {
+ sudo docker cp pebble:/test/certs/pebble.minica.pem /etc/kolla/certificates/ca/pebble-root.crt
+ sudo curl -k -s -o /etc/kolla/certificates/ca/pebble.crt -v https://127.0.0.1:15000/roots/0
+}
+
+function certificates {
+
+ RAW_INVENTORY=/etc/kolla/inventory
source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
- #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there
- sudo chmod -R 777 /etc/kolla
# generate self-signed certificates for the optional internal TLS tests
if [[ "$TLS_ENABLED" = "True" ]]; then
kolla-ansible -i ${RAW_INVENTORY} -vvv certificates > /tmp/logs/ansible/certificates
fi
+ if [[ "$LE_ENABLED" = "True" ]]; then
+ init_pebble
+ pebble_cacert
+ fi
+
+ #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there
+ sudo chmod -R 777 /etc/kolla
+}
+
+
+function deploy {
+
+ RAW_INVENTORY=/etc/kolla/inventory
+ source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
+
+ #TODO(inc0): Post-deploy complains that /etc/kolla is not writable. Probably we need to include become there
+ sudo chmod -R 777 /etc/kolla
+
+ certificates
+
# Actually do the deployment
kolla-ansible -i ${RAW_INVENTORY} -vvv prechecks &> /tmp/logs/ansible/deploy-prechecks
kolla-ansible -i ${RAW_INVENTORY} -vvv pull &> /tmp/logs/ansible/pull
diff --git a/tests/j2lint.py b/tests/j2lint.py
index 65189c38f9..d9cc6e8bf9 100755
--- a/tests/j2lint.py
+++ b/tests/j2lint.py
@@ -23,6 +23,7 @@
Adapted for OpenStack Kolla/Kolla-Ansible purposes
"""
+from ansible.plugins.filter.core import get_encrypted_password
from ansible.plugins.filter.core import to_json
from functools import reduce
from jinja2 import BaseLoader
@@ -51,6 +52,9 @@ def check(template, out, err, env=Environment(loader=AbsolutePathLoader(),
env.filters['bool'] = bool
env.filters['hash'] = hash
env.filters['to_json'] = to_json
+ # NOTE(wszumski): password_hash is mapped to the function:
+ # get_encrypted_password in ansible.filters.core.
+ env.filters['password_hash'] = get_encrypted_password
env.filters['kolla_address'] = kolla_address
env.filters['put_address_in_context'] = put_address_in_context
env.get_template(template)
diff --git a/tests/run-hashi-vault.yml b/tests/run-hashi-vault.yml
index 2fa79663bc..cbeb4aa157 100644
--- a/tests/run-hashi-vault.yml
+++ b/tests/run-hashi-vault.yml
@@ -36,10 +36,12 @@
- name: install kolla-ansible and dependencies
pip:
- name:
- - "{{ kolla_ansible_src_dir }}"
executable: "pip3"
extra_args: "-c {{ upper_constraints_file }} --user"
+ name:
+ - "{{ kolla_ansible_src_dir }}"
+ - "ansible-core{{ ansible_core_version_constraint }}"
+ - "ansible{{ ansible_version_constraint }}"
- name: copy passwords.yml file
copy:
diff --git a/tests/run.yml b/tests/run.yml
index e1b75c3fa0..c70e7ca724 100644
--- a/tests/run.yml
+++ b/tests/run.yml
@@ -21,11 +21,12 @@
need_build_image: "{{ kolla_build_images | default(false) }}"
build_image_tag: "change_{{ zuul.change | default('none') }}"
openstack_core_enabled: "{{ openstack_core_enabled }}"
- openstack_core_tested: "{{ scenario in ['core', 'cephadm', 'zun', 'cells', 'swift', 'ovn'] }}"
+ openstack_core_tested: "{{ scenario in ['core', 'cephadm', 'zun', 'cells', 'swift', 'ovn', 'lets-encrypt'] }}"
dashboard_enabled: "{{ openstack_core_enabled }}"
upper_constraints_file: "{{ ansible_env.HOME }}/src/opendev.org/openstack/requirements/upper-constraints.txt"
docker_image_tag_suffix: "{{ '-aarch64' if ansible_architecture == 'aarch64' else '' }}"
kolla_ansible_venv_path: "{{ ansible_env.HOME }}/kolla-ansible-venv"
+ kolla_internal_fqdn: "kolla.example.com"
- name: Install dig for Designate testing
become: true
@@ -46,6 +47,18 @@
vars:
disk_type: "{{ 'ceph-lvm' if scenario in ['cephadm'] else scenario }}"
+ - name: Update /etc/hosts with internal API FQDN
+ blockinfile:
+ dest: /etc/hosts
+ marker: "# {mark} ANSIBLE GENERATED INTERNAL API FQDN"
+ block: |
+ {{ kolla_internal_vip_address }} {{ kolla_internal_fqdn }}
+ 192.0.2.1 pebble
+ become: True
+ when:
+ - scenario == "lets-encrypt"
+
+
- hosts: primary
any_errors_fatal: true
vars:
@@ -397,6 +410,7 @@
chdir: "{{ kolla_ansible_src_dir }}"
environment:
TLS_ENABLED: "{{ tls_enabled }}"
+ LE_ENABLED: "{{ le_enabled }}"
KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
HAS_UPGRADE: "{{ is_upgrade | bool | ternary('yes', 'no') }}"
@@ -410,6 +424,7 @@
chdir: "{{ kolla_ansible_src_dir }}"
environment:
TLS_ENABLED: "{{ tls_enabled }}"
+ LE_ENABLED: "{{ le_enabled }}"
when: dashboard_enabled
- name: Run init-core-openstack.sh script
@@ -503,6 +518,8 @@
executable: /bin/bash
chdir: "{{ kolla_ansible_src_dir }}"
when: scenario == "octavia"
+ environment:
+ KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}"
- name: Run test-masakari.sh script
script:
diff --git a/tests/setup_gate.sh b/tests/setup_gate.sh
index 5481b8862e..976fc8aafc 100755
--- a/tests/setup_gate.sh
+++ b/tests/setup_gate.sh
@@ -94,6 +94,10 @@ function prepare_images {
GATE_IMAGES="^cron,^fluentd,^haproxy,^keepalived,^kolla-toolbox,^mariadb"
fi
+ if [[ $SCENARIO == "lets-encrypt" ]]; then
+ GATE_IMAGES+=",^letsencrypt,^haproxy"
+ fi
+
if [[ $SCENARIO == "prometheus-opensearch" ]]; then
GATE_IMAGES="^cron,^fluentd,^grafana,^haproxy,^keepalived,^kolla-toolbox,^mariadb,^memcached,^opensearch,^prometheus,^rabbitmq"
fi
diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2
index d60b8edbce..16bc2d16fc 100644
--- a/tests/templates/globals-default.j2
+++ b/tests/templates/globals-default.j2
@@ -213,3 +213,13 @@ keepalived_track_script_enabled: "no"
neutron_modules_extra:
- name: 'nf_conntrack_tftp'
- name: 'nf_nat_tftp'
+
+{% if scenario == "lets-encrypt" %}
+enable_letsencrypt: "yes"
+rabbitmq_enable_tls: "yes"
+letsencrypt_email: "usero@openstack.test"
+letsencrypt_cert_server: "https://pebble:14000/dir"
+kolla_internal_fqdn: "{{ kolla_internal_fqdn }}"
+kolla_enable_tls_backend: "no"
+kolla_admin_openrc_cacert: "{% raw %}{{ kolla_certificates_dir }}{% endraw %}/ca/pebble.crt"
+{% endif %}
diff --git a/tests/templates/inventory.j2 b/tests/templates/inventory.j2
index 43d83666ad..93891e2337 100644
--- a/tests/templates/inventory.j2
+++ b/tests/templates/inventory.j2
@@ -261,6 +261,9 @@ control
[venus:children]
monitoring
+[letsencrypt:children]
+loadbalancer
+
# Additional control implemented here. These groups allow you to control which
# services run on which hosts at a per-service level.
#
@@ -752,3 +755,9 @@ venus
[venus-manager:children]
venus
+
+[letsencrypt-webserver:children]
+letsencrypt
+
+[letsencrypt-lego:children]
+letsencrypt
diff --git a/tests/test-octavia.sh b/tests/test-octavia.sh
index 4ba3457a2d..7f61093ce9 100644
--- a/tests/test-octavia.sh
+++ b/tests/test-octavia.sh
@@ -8,6 +8,12 @@ set -o errexit
# Enable unbuffered output for Ansible in Jenkins.
export PYTHONUNBUFFERED=1
+function check_certificate_expiry {
+ RAW_INVENTORY=/etc/kolla/inventory
+ source $KOLLA_ANSIBLE_VENV_PATH/bin/activate
+ kolla-ansible octavia-certificates --check-expiry 7
+ deactivate
+}
function register_amphora_image {
amphora_url=https://tarballs.opendev.org/openstack/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2
@@ -79,6 +85,9 @@ function test_octavia {
}
function test_octavia_logged {
+ # Check if any certs expire within a week.
+ check_certificate_expiry
+
. /etc/kolla/admin-openrc.sh
. ~/openstackclient-venv/bin/activate
test_octavia
diff --git a/tests/test-prometheus-opensearch.sh b/tests/test-prometheus-opensearch.sh
index 9ce8a76636..c1f8272c16 100755
--- a/tests/test-prometheus-opensearch.sh
+++ b/tests/test-prometheus-opensearch.sh
@@ -79,11 +79,14 @@ function check_prometheus {
# Query prometheus graph, and check that the returned page looks like a
# prometheus page.
PROMETHEUS_URL=${OS_AUTH_URL%:*}:9091/graph
+ prometheus_password=$(awk '$1 == "prometheus_password:" { print $2 }' /etc/kolla/passwords.yml)
output_path=$1
args=(
--include
--location
--fail
+ --user
+ admin:$prometheus_password
)
if [[ "$TLS_ENABLED" = "True" ]]; then
args+=(--cacert $OS_CACERT)
diff --git a/tools/kolla-ansible b/tools/kolla-ansible
index e171ccf90e..1a2eef054c 100755
--- a/tools/kolla-ansible
+++ b/tools/kolla-ansible
@@ -199,6 +199,7 @@ Commands:
stop Stop Kolla containers
certificates Generate self-signed certificate for TLS *For Development Only*
octavia-certificates Generate certificates for octavia deployment
+ --check-expiry to check if certificates expire within that many days
upgrade Upgrades existing OpenStack Environment
upgrade-bifrost Upgrades an existing bifrost container
genconfig Generate configuration files for enabled OpenStack services
@@ -267,7 +268,7 @@ function version {
check_environment_coherence
SHORT_OPTS="hi:p:t:k:e:CD:v"
-LONG_OPTS="help,version,inventory:,playbook:,skip-tags:,tags:,key:,extra:,check,diff,verbose,configdir:,passwords:,limit:,forks:,vault-id:,ask-vault-pass,vault-password-file:,yes-i-really-really-mean-it,include-images,include-dev:,full,incremental"
+LONG_OPTS="help,version,inventory:,playbook:,skip-tags:,tags:,key:,extra:,check,diff,verbose,configdir:,passwords:,limit:,forks:,vault-id:,ask-vault-pass,vault-password-file:,yes-i-really-really-mean-it,include-images,include-dev:,full,incremental,check-expiry:"
RAW_ARGS="$*"
ARGS=$(getopt -o "${SHORT_OPTS}" -l "${LONG_OPTS}" --name "$0" -- "$@") || { usage >&2; exit 2; }
@@ -285,6 +286,7 @@ DANGER_CONFIRM=
INCLUDE_IMAGES=
INCLUDE_DEV=
BACKUP_TYPE="full"
+OCTAVIA_CERTS_EXPIRY=
# Serial is not recommended and disabled by default. Users can enable it by
# configuring ANSIBLE_SERIAL variable.
ANSIBLE_SERIAL=${ANSIBLE_SERIAL:-0}
@@ -402,6 +404,11 @@ while [ "$#" -gt 0 ]; do
shift 1
;;
+ (--check-expiry)
+ OCTAVIA_CERTS_EXPIRY="$2"
+ shift 2
+ ;;
+
(--version)
version
exit 0
@@ -536,6 +543,9 @@ EOF
(octavia-certificates)
ACTION="Generate octavia Certificates"
PLAYBOOK="${BASEDIR}/ansible/octavia-certificates.yml"
+ if [[ ! -z "${OCTAVIA_CERTS_EXPIRY}" ]]; then
+ EXTRA_OPTS="$EXTRA_OPTS -e octavia_certs_check_expiry=yes -e octavia_certs_expiry_limit=${OCTAVIA_CERTS_EXPIRY}"
+ fi
;;
(genconfig)
ACTION="Generate configuration files for enabled OpenStack services"
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index 2f5fa56407..aef6c91d22 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -47,6 +47,7 @@
neutron_tenant_network_prefix_length: "24"
neutron_tenant_network_dns_server: "8.8.8.8"
tls_enabled: false
+ le_enabled: false
configure_swap_size: 0
roles:
- zuul: zuul/zuul-jobs
@@ -236,6 +237,7 @@
- job:
name: kolla-ansible-hashi-vault-base
+ parent: kolla-ansible-variables
run: tests/run-hashi-vault.yml
required-projects:
- openstack/kolla-ansible
@@ -245,3 +247,18 @@
- ^kolla_ansible/
- ^tests/run-hashi-vault.yml
- ^tests/test-hashicorp-vault-passwords.sh
+
+- job:
+ name: kolla-ansible-lets-encrypt-base
+ parent: kolla-ansible-base
+ voting: false
+ files:
+ - ^ansible/roles/letsencrypt/
+ - ^ansible/roles/loadbalancer/
+ - ^tests/test-core-openstack.sh
+ - ^tests/test-dashboard.sh
+ - ^tests/deploy.sh
+ vars:
+ scenario: lets-encrypt
+ tls_enabled: true
+ le_enabled: true
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index d2978ee017..352e74c936 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -398,6 +398,22 @@
vars:
base_distro: ubuntu
+- job:
+ name: kolla-ansible-ubuntu-lets-encrypt
+ parent: kolla-ansible-lets-encrypt-base
+ nodeset: kolla-ansible-jammy-multi
+ vars:
+ base_distro: ubuntu
+ install_type: source
+
+- job:
+ name: kolla-ansible-rocky9-lets-encrypt
+ parent: kolla-ansible-lets-encrypt-base
+ nodeset: kolla-ansible-rocky9-multi
+ vars:
+ base_distro: rocky
+ install_type: source
+
- job:
name: kolla-ansible-rocky9-prometheus-opensearch
parent: kolla-ansible-prometheus-opensearch-base
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index b00865c3b1..a1eaf85807 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -64,6 +64,8 @@
- kolla-ansible-rocky9-upgrade-cephadm
- kolla-ansible-ubuntu-upgrade-cephadm
- kolla-ansible-rocky9-hashi-vault
+ - kolla-ansible-ubuntu-lets-encrypt
+ - kolla-ansible-rocky9-lets-encrypt
check-arm64:
jobs:
- kolla-ansible-debian-bullseye-aarch64