Skip to content

Commit

Permalink
Merge pull request #658 from kube-hetzner/staging
Browse files Browse the repository at this point in the history
Further tweaks to placement group
  • Loading branch information
mysticaltech authored Mar 16, 2023
2 parents f018ac4 + 4b14123 commit 9bc31b9
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion agents.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ module "agents" {
ssh_private_key = var.ssh_private_key
ssh_additional_public_keys = length(var.ssh_hcloud_key_label) > 0 ? concat(var.ssh_additional_public_keys, data.hcloud_ssh_keys.keys_by_selector[0].ssh_keys.*.public_key) : var.ssh_additional_public_keys
firewall_ids = [hcloud_firewall.k3s.id]
placement_group_id = var.placement_group_disable ? null : hcloud_placement_group.agent[floor((index(keys(local.agent_nodes), each.key) + 1) / 10)].id
placement_group_id = var.placement_group_disable ? null : hcloud_placement_group.agent[floor(index(keys(local.agent_nodes), each.key) / 10)].id
location = each.value.location
server_type = each.value.server_type
backups = each.value.backups
Expand Down
2 changes: 1 addition & 1 deletion control_planes.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ module "control_planes" {
ssh_private_key = var.ssh_private_key
ssh_additional_public_keys = length(var.ssh_hcloud_key_label) > 0 ? concat(var.ssh_additional_public_keys, data.hcloud_ssh_keys.keys_by_selector[0].ssh_keys.*.public_key) : var.ssh_additional_public_keys
firewall_ids = [hcloud_firewall.k3s.id]
placement_group_id = var.placement_group_disable ? null : hcloud_placement_group.control_plane[floor((index(keys(local.control_plane_nodes), each.key) + 1) / 10)].id
placement_group_id = var.placement_group_disable ? null : hcloud_placement_group.control_plane[floor(index(keys(local.control_plane_nodes), each.key) / 10)].id
location = each.value.location
server_type = each.value.server_type
backups = each.value.backups
Expand Down
8 changes: 4 additions & 4 deletions init.tf
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ resource "null_resource" "kustomization" {
# Wait for k3s to become ready (we check one more time) because in some edge cases,
# the cluster had become unvailable for a few seconds, at this very instant.
<<-EOT
timeout 180 bash <<EOF
timeout 360 bash <<EOF
until [[ "\$(kubectl get --raw='/readyz' 2> /dev/null)" == "ok" ]]; do
echo "Waiting for the cluster to become ready..."
sleep 2
Expand All @@ -300,13 +300,13 @@ resource "null_resource" "kustomization" {
# Ready, set, go for the kustomization
"kubectl apply -k /var/post_install",
"echo 'Waiting for the system-upgrade-controller deployment to become available...'",
"kubectl -n system-upgrade wait --for=condition=available --timeout=180s deployment/system-upgrade-controller",
"sleep 5", # important as the system upgrade controller CRDs sometimes don't get ready right away, especially with Cilium.
"kubectl -n system-upgrade wait --for=condition=available --timeout=360s deployment/system-upgrade-controller",
"sleep 7", # important as the system upgrade controller CRDs sometimes don't get ready right away, especially with Cilium.
"kubectl -n system-upgrade apply -f /var/post_install/plans.yaml"
],
local.has_external_load_balancer ? [] : [
<<-EOT
timeout 180 bash <<EOF
timeout 360 bash <<EOF
until [ -n "\$(kubectl get -n ${lookup(local.ingress_controller_namespace_names, local.ingress_controller)} service/${lookup(local.ingress_controller_service_names, local.ingress_controller)} --output=jsonpath='{.status.loadBalancer.ingress[0].${var.lb_hostname != "" ? "hostname" : "ip"}}' 2> /dev/null)" ]; do
echo "Waiting for load-balancer to get an IP..."
sleep 2
Expand Down

0 comments on commit 9bc31b9

Please sign in to comment.