From bc6e625bc35b54705415dafce7b4fd30a2406222 Mon Sep 17 00:00:00 2001 From: gizmo-rt Date: Mon, 19 Jan 2026 11:27:02 +0530 Subject: [PATCH] Revert "Adds linter and fixes issue (#338)" This reverts commit f8779dbac0b8ebb408e70e956ee3462642275e0f. --- .tflint.hcl | 21 - account-setup/aws/main.tf | 92 +- account-setup/aws/outputs.tf | 2 +- github/aws/teams.tf | 71 +- github/aws/vars.tf | 22 +- github/azure/teams.tf | 71 +- github/azure/vars.tf | 16 +- github/gcp/teams.tf | 71 +- github/gcp/vars.tf | 22 +- gke/cluster.tf | 42 +- hosted-zones/aws/vars.tf | 21 +- hosted-zones/azure/vars.tf | 23 +- hosted-zones/oci/vars.tf | 19 +- k8s/aws/auth/kubernetes.tf | 24 +- k8s/aws/eks/appdynamics.tf | 2 +- k8s/aws/eks/cert-manager.tf | 58 +- k8s/aws/eks/domain.tf | 6 +- k8s/aws/eks/fluentbit.tf | 86 +- k8s/aws/eks/grafana.tf | 112 +-- k8s/aws/eks/kafka.tf | 6 +- k8s/aws/eks/main.tf | 64 +- k8s/aws/eks/observability.tf | 68 +- k8s/aws/eks/outputs.tf | 26 +- k8s/aws/eks/prometheus.tf | 108 +-- k8s/aws/eks/vars.tf | 431 ++++++---- k8s/aws/namespace/badger-db.tf | 4 +- k8s/aws/namespace/db.tf | 53 +- k8s/aws/namespace/kubernetes.tf | 27 +- k8s/aws/namespace/nginx.tf | 92 +- k8s/aws/namespace/outputs.tf | 36 +- k8s/aws/namespace/secrets.tf | 54 +- k8s/aws/namespace/vars.tf | 252 +++--- k8s/aws/nginx/vars.tf | 22 + k8s/azure/aad/main.tf | 24 +- k8s/azure/aks/fluentbit.tf | 80 +- k8s/azure/aks/grafana-dashboard.tf | 53 +- k8s/azure/aks/grafana.tf | 113 +-- k8s/azure/aks/observability.tf | 2 +- k8s/azure/aks/outputs.tf | 30 +- k8s/azure/aks/prometheus.tf | 121 +-- k8s/azure/aks/vars.tf | 333 +++---- k8s/azure/namespace/badger-db.tf | 4 +- k8s/azure/namespace/configmap.tf | 47 +- k8s/azure/namespace/kubernetes.tf | 44 +- k8s/azure/namespace/nginx.tf | 90 +- k8s/azure/namespace/sql.tf | 159 ++-- k8s/azure/namespace/vars.tf | 272 +++--- k8s/azure/nginx/vars.tf | 10 +- k8s/gcp/gke/appdynamics.tf | 2 +- k8s/gcp/gke/fluentbit.tf | 72 +- k8s/gcp/gke/grafana-dashboard.tf | 63 +- k8s/gcp/gke/grafana.tf | 190 ++-- k8s/gcp/gke/helm.tf | 10 +- k8s/gcp/gke/kubernetes.tf | 4 +- k8s/gcp/gke/main.tf | 96 ++- k8s/gcp/gke/nginx.tf | 6 +- k8s/gcp/gke/prometheus.tf | 112 +-- k8s/gcp/gke/vars.tf | 1288 ++++++++++++++-------------- k8s/gcp/namespace/badger-db.tf | 4 +- k8s/gcp/namespace/kubernetes.tf | 18 +- k8s/gcp/namespace/nginx.tf | 160 ++-- k8s/gcp/namespace/sql.tf | 34 +- k8s/gcp/namespace/vars.tf | 248 +++--- k8s/gcp/nginx/vars.tf | 26 +- k8s/oci/namespace/badger-db.tf | 4 +- k8s/oci/namespace/main.tf | 22 +- k8s/oci/namespace/nginx.tf | 98 +-- k8s/oci/namespace/secrets.tf | 38 +- k8s/oci/namespace/vars.tf | 212 ++--- k8s/oci/nginx/vars.tf | 6 +- k8s/oci/oke/fluentbit.tf | 80 +- k8s/oci/oke/grafana-dashboard.tf | 53 +- k8s/oci/oke/grafana.tf | 89 +- k8s/oci/oke/main.tf | 55 +- k8s/oci/oke/prometheus.tf | 112 +-- kafka/aws-msk/main.tf | 2 +- kafka/aws-msk/output.tf | 6 +- kafka/aws-msk/vars.tf | 15 + kops-kube/aws/kubernetes.tf | 26 +- kops-kube/azure/kubernetes.tf | 44 +- kops-kube/gcp/kubernetes.tf | 20 +- object-storage/aws/vars.tf | 5 + observability/aws/issuer.tf | 4 +- observability/aws/main.tf | 44 +- observability/aws/vars.tf | 162 ++-- observability/azure/issuer.tf | 4 +- observability/azure/main.tf | 36 +- observability/azure/vars.tf | 155 ++-- observability/gcp/issuer.tf | 4 +- observability/gcp/main.tf | 44 +- observability/gcp/vars.tf | 190 ++-- observability/oci/issuer.tf | 4 +- observability/oci/main.tf | 44 +- observability/oci/vars.tf | 156 ++-- redis/aws-elasticache/main.tf | 99 +-- redis/aws-elasticache/outputs.tf | 22 +- redis/azure-redis/main.tf | 45 +- redis/azure-redis/vars.tf | 46 +- redis/gcp-redis/main.tf | 94 +- redis/gcp-redis/outputs.tf | 14 +- redis/oci-redis/kubernetes.tf | 32 +- redis/oci-redis/vars.tf | 52 +- sql/aws-rds/main.tf | 49 +- sql/aws-rds/outputs.tf | 4 +- sql/aws-rds/vars.tf | 12 + sql/azure-mysql/vars.tf | 14 +- sql/azure-postgres/vars.tf | 14 +- sql/gcp-sql/main.tf | 140 +-- sql/gcp-sql/outputs.tf | 4 +- sql/gcp-sql/vars.tf | 42 +- sql/oci-mysql/secrets.tf | 85 +- sql/oci-mysql/vars.tf | 26 +- sql/oci-postgres/secrets.tf | 85 +- sql/oci-postgres/vars.tf | 26 +- zop-system/aws/kubernetes.tf | 6 +- zop-system/azure/kubernetes.tf | 26 +- zop-system/gcp/kubernetes.tf | 24 +- 117 files changed, 4543 insertions(+), 3891 deletions(-) delete mode 100644 .tflint.hcl diff --git a/.tflint.hcl b/.tflint.hcl deleted file mode 100644 index 53f0bc27..00000000 --- a/.tflint.hcl +++ /dev/null @@ -1,21 +0,0 @@ -plugin "terraform" { - enabled = true - preset = "recommended" -} - -plugin "aws" { - enabled = true - version = "0.35.0" - source = "github.com/terraform-linters/tflint-ruleset-aws" -} - -plugin "google" { - enabled = true - version = "0.26.0" - source = "github.com/terraform-linters/tflint-ruleset-google" -} - -config { - call_module_type = "all" -} - diff --git a/account-setup/aws/main.tf b/account-setup/aws/main.tf index 3390329b..25ef766b 100644 --- a/account-setup/aws/main.tf +++ b/account-setup/aws/main.tf @@ -4,31 +4,31 @@ locals { for subnet in var.subnets[vpc_name].public_subnets_cidr : "${vpc_name}-${subnet}" => { vpc_id = aws_vpc.vpc[vpc_name].id subnet = subnet - az = var.subnets[vpc_name].availability_zones[index(var.subnets[vpc_name].public_subnets_cidr, subnet)] + az = var.subnets[vpc_name].availability_zones[index(var.subnets[vpc_name].public_subnets_cidr,subnet)] } }) - ] - ...) + ] + ...) private_subnet_map = merge([ for vpc_name in keys(var.subnets) : tomap({ for subnet in var.subnets[vpc_name].private_subnets_cidr : "${vpc_name}-${subnet}" => { vpc_id = aws_vpc.vpc[vpc_name].id subnet = subnet - az = var.subnets[vpc_name].availability_zones[index(var.subnets[vpc_name].private_subnets_cidr, subnet)] + az = var.subnets[vpc_name].availability_zones[index(var.subnets[vpc_name].private_subnets_cidr,subnet)] } }) - ] - ...) + ] + ...) db_subnet_map = merge([ for vpc_name in keys(var.subnets) : tomap({ for subnet in var.subnets[vpc_name].db_subnets_cidr : "${vpc_name}-${subnet}" => { vpc_id = aws_vpc.vpc[vpc_name].id subnet = subnet - az = var.subnets[vpc_name].availability_zones[index(var.subnets[vpc_name].db_subnets_cidr, subnet)] + az = var.subnets[vpc_name].availability_zones[index(var.subnets[vpc_name].db_subnets_cidr,subnet)] } }) - ] - ...) + ] + ...) } @@ -48,12 +48,12 @@ resource "aws_subnet" "public_subnets" { for_each = local.public_subnet_map vpc_id = each.value["vpc_id"] cidr_block = each.value["subnet"] - availability_zone = each.value["az"] + availability_zone = each.value["az"] map_public_ip_on_launch = true tags = { Name = "${each.key}-public-subnet" - Environment = each.key + Environment = "${each.key}" } depends_on = [ aws_vpc.vpc @@ -64,12 +64,12 @@ resource "aws_subnet" "private_subnets" { for_each = local.private_subnet_map vpc_id = each.value["vpc_id"] cidr_block = each.value["subnet"] - availability_zone = each.value["az"] + availability_zone = each.value["az"] map_public_ip_on_launch = false tags = { Name = "${each.key}-private-subnet" - Environment = each.key + Environment = "${each.key}" } depends_on = [ aws_vpc.vpc @@ -80,12 +80,12 @@ resource "aws_subnet" "db_subnets" { for_each = local.db_subnet_map vpc_id = each.value["vpc_id"] cidr_block = each.value["subnet"] - availability_zone = each.value["az"] + availability_zone = each.value["az"] map_public_ip_on_launch = false tags = { Name = "${each.key}-db-subnet" - Environment = each.key + Environment = "${each.key}" } depends_on = [ aws_vpc.vpc @@ -94,7 +94,7 @@ resource "aws_subnet" "db_subnets" { resource "aws_internet_gateway" "internet_gw" { for_each = var.subnets - vpc_id = aws_vpc.vpc[each.key].id + vpc_id = aws_vpc.vpc[each.key].id tags = { Name = "${each.key}-internet-gw" @@ -104,7 +104,7 @@ resource "aws_internet_gateway" "internet_gw" { ## Route table resource "aws_route_table" "public_route_table" { for_each = var.subnets - vpc_id = aws_vpc.vpc[each.key].id + vpc_id = aws_vpc.vpc[each.key].id tags = { Name = "${each.key}-public_route_table" @@ -112,8 +112,8 @@ resource "aws_route_table" "public_route_table" { } resource "aws_route_table" "private_route_table" { - for_each = local.private_subnet_map - vpc_id = aws_vpc.vpc[split("-", each.key)[0]].id + for_each = local.private_subnet_map + vpc_id = aws_vpc.vpc[split("-",each.key)[0]].id tags = { Name = "${each.key}-private_route_table" @@ -121,8 +121,8 @@ resource "aws_route_table" "private_route_table" { } resource "aws_route_table" "db_route_table" { - for_each = local.db_subnet_map - vpc_id = aws_vpc.vpc[split("-", each.key)[0]].id + for_each = local.db_subnet_map + vpc_id = aws_vpc.vpc[split("-",each.key)[0]].id tags = { Name = "${each.key}-db_route_table" @@ -130,8 +130,8 @@ resource "aws_route_table" "db_route_table" { } resource "aws_eip" "eip" { - for_each = local.public_subnet_map - vpc = true + for_each = local.public_subnet_map + vpc = true depends_on = [aws_internet_gateway.internet_gw] tags = { Name = "${each.key}-nat-gateway-eip" @@ -139,62 +139,62 @@ resource "aws_eip" "eip" { } resource "aws_nat_gateway" "nat-gateway" { - for_each = local.public_subnet_map + for_each = local.public_subnet_map allocation_id = aws_eip.eip[each.key].id subnet_id = aws_subnet.public_subnets[each.key].id tags = { Name = "${each.key}-nat-gateway-public" } - depends_on = [aws_eip.eip, aws_subnet.public_subnets, aws_internet_gateway.internet_gw] + depends_on = [aws_eip.eip,aws_subnet.public_subnets,aws_internet_gateway.internet_gw] } resource "aws_route" "public_route" { - for_each = var.subnets - route_table_id = aws_route_table.public_route_table[each.key].id - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.internet_gw[each.key].id - depends_on = [aws_internet_gateway.internet_gw] + for_each = var.subnets + route_table_id = aws_route_table.public_route_table[each.key].id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.internet_gw[each.key].id + depends_on = [aws_internet_gateway.internet_gw] } resource "aws_route" "private_route" { - count = length(keys(local.private_subnet_map)) - route_table_id = aws_route_table.private_route_table[element(keys(local.private_subnet_map), count.index)].id - destination_cidr_block = "0.0.0.0/0" - nat_gateway_id = aws_nat_gateway.nat-gateway[element(keys(local.public_subnet_map), count.index)].id - depends_on = [aws_nat_gateway.nat-gateway, aws_internet_gateway.internet_gw] + count = length(keys(local.private_subnet_map)) + route_table_id = aws_route_table.private_route_table[element(keys(local.private_subnet_map),count.index)].id + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.nat-gateway[element(keys(local.public_subnet_map),count.index)].id + depends_on = [aws_nat_gateway.nat-gateway,aws_internet_gateway.internet_gw] } resource "aws_route" "db_route" { - count = length(keys(local.db_subnet_map)) - route_table_id = aws_route_table.db_route_table[element(keys(local.db_subnet_map), count.index)].id - destination_cidr_block = "0.0.0.0/0" - nat_gateway_id = aws_nat_gateway.nat-gateway[element(keys(local.public_subnet_map), count.index)].id - depends_on = [aws_nat_gateway.nat-gateway, aws_internet_gateway.internet_gw] + count = length(keys(local.db_subnet_map)) + route_table_id = aws_route_table.db_route_table[element(keys(local.db_subnet_map),count.index)].id + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.nat-gateway[element(keys(local.public_subnet_map),count.index)].id + depends_on = [aws_nat_gateway.nat-gateway,aws_internet_gateway.internet_gw] } resource "aws_route_table_association" "public_route_table_association" { - for_each = local.public_subnet_map + for_each = local.public_subnet_map subnet_id = aws_subnet.public_subnets[each.key].id - route_table_id = aws_route_table.public_route_table[split("-", each.key)[0]].id + route_table_id = aws_route_table.public_route_table[split("-",each.key)[0]].id } resource "aws_route_table_association" "private_route_table_association" { - for_each = local.private_subnet_map + for_each = local.private_subnet_map - subnet_id = aws_subnet.private_subnets[each.key].id + subnet_id = aws_subnet.private_subnets[each.key].id route_table_id = aws_route_table.private_route_table[each.key].id } resource "aws_route_table_association" "db_route_table_association" { - for_each = local.db_subnet_map + for_each = local.db_subnet_map subnet_id = aws_subnet.db_subnets[each.key].id route_table_id = aws_route_table.db_route_table[each.key].id } resource "aws_security_group" "allow_tls" { - for_each = var.subnets + for_each = var.subnets description = "Cluster communication with worker nodes" vpc_id = aws_vpc.vpc[each.key].id } diff --git a/account-setup/aws/outputs.tf b/account-setup/aws/outputs.tf index 9b9b5e69..26857101 100644 --- a/account-setup/aws/outputs.tf +++ b/account-setup/aws/outputs.tf @@ -1,5 +1,5 @@ output "vpc_id" { - value = [aws_vpc.vpc] + value = ["${aws_vpc.vpc}"] } output "public_subnets" { diff --git a/github/aws/teams.tf b/github/aws/teams.tf index 72f901ac..ffe92300 100644 --- a/github/aws/teams.tf +++ b/github/aws/teams.tf @@ -1,15 +1,16 @@ data "github_organization" "organization" { - name = var.owner + name = var.owner } locals { + org_members = data.github_organization.organization.members github_repo_admin_access = merge([ for team in keys(var.github_teams) : tomap({ for user in var.github_teams[team].admins : "${team}-${user}" => { - user = user - team = team - } - }) + user = user + team = team + } + }) ]...) github_repo_editor_access = merge([ @@ -32,58 +33,58 @@ locals { } resource "github_team" "admin_team" { - for_each = var.github_teams - name = "${each.key}_admin" + for_each = var.github_teams + name = "${each.key}_admin" } resource "github_team" "editor_team" { - for_each = var.github_teams - name = "${each.key}_editor" + for_each = var.github_teams + name = "${each.key}_editor" } resource "github_team" "viewer_team" { - for_each = var.github_teams - name = "${each.key}_viewer" + for_each = var.github_teams + name = "${each.key}_viewer" } resource "github_team_membership" "admin_team" { - for_each = local.github_repo_admin_access - team_id = github_team.admin_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_admin_access + team_id = github_team.admin_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_membership" "editor_team" { - for_each = local.github_repo_editor_access - team_id = github_team.editor_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_editor_access + team_id = github_team.editor_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_membership" "viewer_team" { - for_each = local.github_repo_viewer_access - team_id = github_team.viewer_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_viewer_access + team_id = github_team.viewer_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_repository" "admin_team" { - for_each = var.github_repos - team_id = github_team.admin_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "admin" + for_each = var.github_repos + team_id = github_team.admin_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "admin" } resource "github_team_repository" "editor_team" { - for_each = var.github_repos - team_id = github_team.editor_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "push" + for_each = var.github_repos + team_id = github_team.editor_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "push" } resource "github_team_repository" "viewer_team" { - for_each = var.github_repos - team_id = github_team.viewer_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "pull" + for_each = var.github_repos + team_id = github_team.viewer_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "pull" } \ No newline at end of file diff --git a/github/aws/vars.tf b/github/aws/vars.tf index a1545cfb..4ac4b4c2 100644 --- a/github/aws/vars.tf +++ b/github/aws/vars.tf @@ -4,9 +4,15 @@ variable "owner" { default = "" } +variable "project_name" { + description = "Name of the GCP project where GIT PAT is stored as secret" + type = string + default = "" +} + variable "github_repos" { description = "Map of repositories with their respective properties" - type = map(object({ + type = map(object({ name = string team_name = string visibility = optional(string) @@ -24,14 +30,20 @@ variable "github_repos" { variable "github_teams" { description = "Map of teams with their respective users who can have required access on particular repo" - type = map(object({ - admins = list(string) - editors = list(string) - viewers = list(string) + type = map(object({ + admins = list(string) + editors = list(string) + viewers = list(string) })) default = {} } +variable "github_base_url" { + description = "The base URL of the GitHub API" + type = string + default = "https://github.com" +} + variable "is_enterprise" { description = "Flag to indicate whether the GitHub organization is enterprise or free" type = bool diff --git a/github/azure/teams.tf b/github/azure/teams.tf index 72f901ac..ffe92300 100644 --- a/github/azure/teams.tf +++ b/github/azure/teams.tf @@ -1,15 +1,16 @@ data "github_organization" "organization" { - name = var.owner + name = var.owner } locals { + org_members = data.github_organization.organization.members github_repo_admin_access = merge([ for team in keys(var.github_teams) : tomap({ for user in var.github_teams[team].admins : "${team}-${user}" => { - user = user - team = team - } - }) + user = user + team = team + } + }) ]...) github_repo_editor_access = merge([ @@ -32,58 +33,58 @@ locals { } resource "github_team" "admin_team" { - for_each = var.github_teams - name = "${each.key}_admin" + for_each = var.github_teams + name = "${each.key}_admin" } resource "github_team" "editor_team" { - for_each = var.github_teams - name = "${each.key}_editor" + for_each = var.github_teams + name = "${each.key}_editor" } resource "github_team" "viewer_team" { - for_each = var.github_teams - name = "${each.key}_viewer" + for_each = var.github_teams + name = "${each.key}_viewer" } resource "github_team_membership" "admin_team" { - for_each = local.github_repo_admin_access - team_id = github_team.admin_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_admin_access + team_id = github_team.admin_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_membership" "editor_team" { - for_each = local.github_repo_editor_access - team_id = github_team.editor_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_editor_access + team_id = github_team.editor_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_membership" "viewer_team" { - for_each = local.github_repo_viewer_access - team_id = github_team.viewer_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_viewer_access + team_id = github_team.viewer_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_repository" "admin_team" { - for_each = var.github_repos - team_id = github_team.admin_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "admin" + for_each = var.github_repos + team_id = github_team.admin_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "admin" } resource "github_team_repository" "editor_team" { - for_each = var.github_repos - team_id = github_team.editor_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "push" + for_each = var.github_repos + team_id = github_team.editor_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "push" } resource "github_team_repository" "viewer_team" { - for_each = var.github_repos - team_id = github_team.viewer_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "pull" + for_each = var.github_repos + team_id = github_team.viewer_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "pull" } \ No newline at end of file diff --git a/github/azure/vars.tf b/github/azure/vars.tf index cb6ea6a7..bb9b5578 100644 --- a/github/azure/vars.tf +++ b/github/azure/vars.tf @@ -6,7 +6,7 @@ variable "owner" { variable "github_repos" { description = "Map of repositories with their respective properties" - type = map(object({ + type = map(object({ name = string team_name = string visibility = optional(string) @@ -24,14 +24,20 @@ variable "github_repos" { variable "github_teams" { description = "Map of teams with their respective users who can have required access on particular repo" - type = map(object({ - admins = list(string) - editors = list(string) - viewers = list(string) + type = map(object({ + admins = list(string) + editors = list(string) + viewers = list(string) })) default = {} } +variable "github_base_url" { + description = "The base URL of the GitHub API" + type = string + default = "https://github.com" +} + variable "is_enterprise" { description = "Flag to indicate whether the GitHub organization is enterprise or free" type = bool diff --git a/github/gcp/teams.tf b/github/gcp/teams.tf index 72f901ac..ffe92300 100644 --- a/github/gcp/teams.tf +++ b/github/gcp/teams.tf @@ -1,15 +1,16 @@ data "github_organization" "organization" { - name = var.owner + name = var.owner } locals { + org_members = data.github_organization.organization.members github_repo_admin_access = merge([ for team in keys(var.github_teams) : tomap({ for user in var.github_teams[team].admins : "${team}-${user}" => { - user = user - team = team - } - }) + user = user + team = team + } + }) ]...) github_repo_editor_access = merge([ @@ -32,58 +33,58 @@ locals { } resource "github_team" "admin_team" { - for_each = var.github_teams - name = "${each.key}_admin" + for_each = var.github_teams + name = "${each.key}_admin" } resource "github_team" "editor_team" { - for_each = var.github_teams - name = "${each.key}_editor" + for_each = var.github_teams + name = "${each.key}_editor" } resource "github_team" "viewer_team" { - for_each = var.github_teams - name = "${each.key}_viewer" + for_each = var.github_teams + name = "${each.key}_viewer" } resource "github_team_membership" "admin_team" { - for_each = local.github_repo_admin_access - team_id = github_team.admin_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_admin_access + team_id = github_team.admin_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_membership" "editor_team" { - for_each = local.github_repo_editor_access - team_id = github_team.editor_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_editor_access + team_id = github_team.editor_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_membership" "viewer_team" { - for_each = local.github_repo_viewer_access - team_id = github_team.viewer_team[each.value.team].id - username = each.value.user - role = "member" + for_each = local.github_repo_viewer_access + team_id = github_team.viewer_team[each.value.team].id + username = each.value.user + role = "member" } resource "github_team_repository" "admin_team" { - for_each = var.github_repos - team_id = github_team.admin_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "admin" + for_each = var.github_repos + team_id = github_team.admin_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "admin" } resource "github_team_repository" "editor_team" { - for_each = var.github_repos - team_id = github_team.editor_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "push" + for_each = var.github_repos + team_id = github_team.editor_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "push" } resource "github_team_repository" "viewer_team" { - for_each = var.github_repos - team_id = github_team.viewer_team[each.value.team_name].id - repository = github_repository.app_repo[each.key].name - permission = "pull" + for_each = var.github_repos + team_id = github_team.viewer_team[each.value.team_name].id + repository = github_repository.app_repo[each.key].name + permission = "pull" } \ No newline at end of file diff --git a/github/gcp/vars.tf b/github/gcp/vars.tf index a1545cfb..4ac4b4c2 100644 --- a/github/gcp/vars.tf +++ b/github/gcp/vars.tf @@ -4,9 +4,15 @@ variable "owner" { default = "" } +variable "project_name" { + description = "Name of the GCP project where GIT PAT is stored as secret" + type = string + default = "" +} + variable "github_repos" { description = "Map of repositories with their respective properties" - type = map(object({ + type = map(object({ name = string team_name = string visibility = optional(string) @@ -24,14 +30,20 @@ variable "github_repos" { variable "github_teams" { description = "Map of teams with their respective users who can have required access on particular repo" - type = map(object({ - admins = list(string) - editors = list(string) - viewers = list(string) + type = map(object({ + admins = list(string) + editors = list(string) + viewers = list(string) })) default = {} } +variable "github_base_url" { + description = "The base URL of the GitHub API" + type = string + default = "https://github.com" +} + variable "is_enterprise" { description = "Flag to indicate whether the GitHub organization is enterprise or free" type = bool diff --git a/gke/cluster.tf b/gke/cluster.tf index 5342abb9..2924964f 100644 --- a/gke/cluster.tf +++ b/gke/cluster.tf @@ -93,9 +93,9 @@ resource "google_container_cluster" "primary" { dynamic "resource_limits" { for_each = local.autoscaling_resource_limits content { - resource_type = resource_limits.value["resource_type"] - minimum = resource_limits.value["minimum"] - maximum = resource_limits.value["maximum"] + resource_type = lookup(resource_limits.value, "resource_type") + minimum = lookup(resource_limits.value, "minimum") + maximum = lookup(resource_limits.value, "maximum") } } } @@ -244,7 +244,7 @@ resource "google_container_cluster" "primary" { } lifecycle { - ignore_changes = [node_pool, initial_node_count, resource_labels["asmv"], resource_labels["mesh_id"], user_managed_keys_config] + ignore_changes = [node_pool, initial_node_count, resource_labels["asmv"], resource_labels["mesh_id"],user_managed_keys_config] } dynamic "dns_config" { @@ -286,8 +286,8 @@ resource "google_container_cluster" "primary" { service_account = lookup(var.node_pools[0], "service_account", local.service_account) tags = concat( - lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], - lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-default-pool"] : [], lookup(local.node_pools_tags, "all", []), lookup(local.node_pools_tags, var.node_pools[0].name, []), ) @@ -436,8 +436,8 @@ resource "google_container_node_pool" "pools" { } } - network_config { - enable_private_nodes = true + network_config { + enable_private_nodes = true } node_config { @@ -457,8 +457,8 @@ resource "google_container_node_pool" "pools" { } } labels = merge( - lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, local.node_pools_labels["all"], local.node_pools_labels[each.value["name"]], ) @@ -467,8 +467,8 @@ resource "google_container_node_pool" "pools" { local.node_pools_resource_labels[each.value["name"]], ) metadata = merge( - lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, local.node_pools_metadata["all"], local.node_pools_metadata[each.value["name"]], { @@ -487,8 +487,8 @@ resource "google_container_node_pool" "pools" { } } tags = concat( - lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], - lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], local.node_pools_tags["all"], local.node_pools_tags[each.value["name"]], ) @@ -570,7 +570,7 @@ resource "google_container_node_pool" "pools" { update = lookup(var.timeouts, "update", "45m") delete = lookup(var.timeouts, "delete", "45m") } - + } resource "google_container_node_pool" "windows_pools" { provider = google @@ -652,8 +652,8 @@ resource "google_container_node_pool" "windows_pools" { } } labels = merge( - lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_labels, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, local.node_pools_labels["all"], local.node_pools_labels[each.value["name"]], ) @@ -662,8 +662,8 @@ resource "google_container_node_pool" "windows_pools" { local.node_pools_resource_labels[each.value["name"]], ) metadata = merge( - lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, - lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "cluster_name", true) ? { "cluster_name" = var.name } : {}, + lookup(lookup(local.node_pools_metadata, "default_values", {}), "node_pool", true) ? { "node_pool" = each.value["name"] } : {}, local.node_pools_metadata["all"], local.node_pools_metadata[each.value["name"]], { @@ -682,8 +682,8 @@ resource "google_container_node_pool" "windows_pools" { } } tags = concat( - lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], - lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[0] ? [local.cluster_network_tag] : [], + lookup(local.node_pools_tags, "default_values", [true, true])[1] ? ["${local.cluster_network_tag}-${each.value["name"]}"] : [], local.node_pools_tags["all"], local.node_pools_tags[each.value["name"]], ) diff --git a/hosted-zones/aws/vars.tf b/hosted-zones/aws/vars.tf index b2554bd1..4b922e4b 100644 --- a/hosted-zones/aws/vars.tf +++ b/hosted-zones/aws/vars.tf @@ -1,15 +1,30 @@ variable "zones" { description = "The list of user access for the account setup" type = map(object({ - domain = string + domain = string add_ns_records = bool })) } +variable "user_access" { + description = "map of roles for domain" + type = object({ + editors = optional(list(string)) + viewers = optional(list(string)) + }) + default = {} +} + +variable "provisioner" { + description = "Provisioner being used to setup Infra" + type = string + default = "zop-dev" +} + variable "master_zone" { description = "master zone for ns record to be added" - type = string - default = "" + type = string + default = "" } variable "caa_certs" { diff --git a/hosted-zones/azure/vars.tf b/hosted-zones/azure/vars.tf index f2479ec0..de33dbe5 100644 --- a/hosted-zones/azure/vars.tf +++ b/hosted-zones/azure/vars.tf @@ -1,20 +1,35 @@ variable "zones" { description = "The list of user access for the account setup" type = map(object({ - domain = string + domain = string add_ns_records = bool })) } +variable "user_access" { + description = "map of roles for domain" + type = object({ + editors = optional(list(string)) + viewers = optional(list(string)) + }) + default = {} +} + +variable "provisioner" { + description = "Provisioner being used to setup Infra" + type = string + default = "zop-dev" +} + variable "master_zone" { description = "master zone for ns record to be added" - type = string - default = "" + type = string + default = "" } variable "resource_group_name" { description = "azure resource group name" - type = string + type = string } variable "caa_certs" { diff --git a/hosted-zones/oci/vars.tf b/hosted-zones/oci/vars.tf index 1c47feab..742a3d9c 100644 --- a/hosted-zones/oci/vars.tf +++ b/hosted-zones/oci/vars.tf @@ -1,11 +1,26 @@ variable "zones" { description = "The list of user access for the account setup" type = map(object({ - domain = string + domain = string add_ns_records = bool })) } +variable "user_access" { + description = "map of roles for domain" + type = object({ + editors = optional(list(string)) + viewers = optional(list(string)) + }) + default = {} +} + +variable "provisioner" { + description = "Provisioner being used to setup Infra" + type = string + default = "zop-dev" +} + variable "master_zone" { description = "Master zone for NS record to be added" type = string @@ -14,5 +29,5 @@ variable "master_zone" { variable "provider_id" { description = "OCI compartment ID" - type = string + type = string } \ No newline at end of file diff --git a/k8s/aws/auth/kubernetes.tf b/k8s/aws/auth/kubernetes.tf index 6a716ae3..925ffcac 100644 --- a/k8s/aws/auth/kubernetes.tf +++ b/k8s/aws/auth/kubernetes.tf @@ -3,19 +3,19 @@ locals { } module "remote_state_gcp_cluster" { - source = "../../../remote-state/gcp" - count = var.shared_services.type == "gcp" ? 1 : 0 - bucket_name = var.shared_services.bucket - bucket_prefix = local.cluster_prefix + source = "../../../remote-state/gcp" + count = var.shared_services.type == "gcp" ? 1 : 0 + bucket_name = var.shared_services.bucket + bucket_prefix = local.cluster_prefix } module "remote_state_aws_cluster" { - source = "../../../remote-state/aws" - count = var.shared_services.type == "aws" ? 1 : 0 - bucket_name = var.shared_services.bucket - provider_id = var.shared_services.profile - bucket_prefix = local.cluster_prefix - location = var.shared_services.location + source = "../../../remote-state/aws" + count = var.shared_services.type == "aws" ? 1 : 0 + bucket_name = var.shared_services.bucket + provider_id = var.shared_services.profile + bucket_prefix = local.cluster_prefix + location = var.shared_services.location } module "remote_state_azure_cluster" { @@ -37,7 +37,7 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } @@ -45,6 +45,6 @@ provider "kubernetes" { provider "kubectl" { load_config_file = false host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } \ No newline at end of file diff --git a/k8s/aws/eks/appdynamics.tf b/k8s/aws/eks/appdynamics.tf index f3c10b5a..364e6ad6 100644 --- a/k8s/aws/eks/appdynamics.tf +++ b/k8s/aws/eks/appdynamics.tf @@ -3,7 +3,7 @@ resource "helm_release" "app_dynamics" { count = var.appd_controller_url == "" || var.appd_controller_url == "" || var.appd_account == "" || var.appd_user == "" || var.appd_password == "" || var.appd_accesskey == "" ? 0 : 1 chart = "cluster-agent" name = "cluster-agent" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name version = "0.1.18" repository = "https://ciscodevnet.github.io/appdynamics-charts" diff --git a/k8s/aws/eks/cert-manager.tf b/k8s/aws/eks/cert-manager.tf index 7fce2f72..b275f308 100644 --- a/k8s/aws/eks/cert-manager.tf +++ b/k8s/aws/eks/cert-manager.tf @@ -1,6 +1,6 @@ resource "null_resource" "wait_for_cluster" { provisioner "local-exec" { - command = "sleep 60" # Adjust the duration as needed + command = "sleep 60" # Adjust the duration as needed } depends_on = [module.eks] @@ -8,9 +8,9 @@ resource "null_resource" "wait_for_cluster" { data "template_file" "cert_manager_template" { template = file("./templates/cert-manager-values.yaml") - vars = { - CLUSTER_NAME = local.cluster_name - role_arn = aws_iam_role.cluster_issuer_role.arn + vars = { + CLUSTER_NAME = local.cluster_name + role_arn = aws_iam_role.cluster_issuer_role.arn } } @@ -33,9 +33,9 @@ resource "helm_release" "cert-manager" { } resource "aws_iam_policy" "cluster_issuer" { - name = "${local.cluster_name}-cluster-issuer-policy" - policy = data.aws_iam_policy_document.cluster_issuer_policy_document.json - tags = local.common_tags + name = "${local.cluster_name}-cluster-issuer-policy" + policy = data.aws_iam_policy_document.cluster_issuer_policy_document.json + tags = local.common_tags } data "aws_iam_policy_document" "cluster_issuer_policy_document" { @@ -53,7 +53,7 @@ data "aws_iam_policy_document" "cluster_issuer_policy_document" { "route53:ChangeResourceRecordSets", "route53:ListResourceRecordSets" ] - resources = ["arn:aws:route53:::hostedzone/${data.aws_route53_zone.zone[0].zone_id}"] + resources = ["arn:aws:route53:::hostedzone/${data.aws_route53_zone.zone.0.zone_id}"] } statement { @@ -66,26 +66,26 @@ data "aws_iam_policy_document" "cluster_issuer_policy_document" { } resource "aws_iam_role" "cluster_issuer_role" { - name = "${local.cluster_name}-cluster-issuer-role" + name = "${local.cluster_name}-cluster-issuer-role" assume_role_policy = jsonencode({ - Version : "2012-10-17" - Statement : [ - { - "Sid" : "", - "Action" : "sts:AssumeRole" - "Effect" : "Allow" - "Principal" : { - "Service" : "ec2.amazonaws.com" + Version: "2012-10-17" + Statement: [ + { + "Sid": "", + "Action": "sts:AssumeRole" + "Effect": "Allow" + "Principal": { + "Service": "ec2.amazonaws.com" + } } - } - ] - }) + ] + }) } resource "aws_iam_user" "cluster_issuer" { - name = "${local.cluster_name}-issuer-user" - tags = local.common_tags + name = "${local.cluster_name}-issuer-user" + tags = local.common_tags } resource "aws_iam_user_policy_attachment" "cluster_issuer_attach" { @@ -115,15 +115,15 @@ resource "kubernetes_secret" "cluster_issuer_credentials" { data "template_file" "cluster_wildcard_issuer" { template = file("./templates/cluster-issuer.yaml") - vars = { + vars = { dns = local.domain_name - cert_issuer_url = try(var.cert_issuer_config.env == "stage" ? "https://acme-staging-v02.api.letsencrypt.org/directory" : "https://acme-v02.api.letsencrypt.org/directory", "https://acme-staging-v02.api.letsencrypt.org/directory") + cert_issuer_url = try(var.cert_issuer_config.env == "stage" ? "https://acme-staging-v02.api.letsencrypt.org/directory" : "https://acme-v02.api.letsencrypt.org/directory","https://acme-staging-v02.api.letsencrypt.org/directory") location = var.app_region - zone_id = data.aws_route53_zone.zone[0].zone_id + zone_id = data.aws_route53_zone.zone.0.zone_id secret_name = "${local.cluster_name}-cluster-issuer-creds" email = var.cert_issuer_config.email } - depends_on = [helm_release.cert-manager, kubernetes_namespace.monitoring] + depends_on = [helm_release.cert-manager,kubernetes_namespace.monitoring] } resource "kubectl_manifest" "cluster_wildcard_issuer" { @@ -132,8 +132,8 @@ resource "kubectl_manifest" "cluster_wildcard_issuer" { data "template_file" "cluster_wildcard_certificate" { template = file("./templates/cluster-certificate.yaml") - vars = { - dns = local.domain_name + vars = { + dns = local.domain_name } depends_on = [kubectl_manifest.cluster_wildcard_issuer] } @@ -144,7 +144,7 @@ resource "kubectl_manifest" "cluster_wildcard_certificate" { resource "kubernetes_secret_v1" "certificate_replicator" { metadata { - name = "tls-secret-replica" + name = "tls-secret-replica" namespace = "monitoring" annotations = { "replicator.v1.mittwald.de/replicate-from" = "cert-manager/wildcard-dns" diff --git a/k8s/aws/eks/domain.tf b/k8s/aws/eks/domain.tf index 62276548..fe709d41 100644 --- a/k8s/aws/eks/domain.tf +++ b/k8s/aws/eks/domain.tf @@ -1,5 +1,5 @@ locals { - domain_name = try(var.accessibility.domain_name != null ? var.accessibility.domain_name : "", "") + domain_name = try(var.accessibility.domain_name != null ? var.accessibility.domain_name : "", "") } data "aws_route53_zone" "zone" { @@ -19,10 +19,10 @@ data "kubernetes_service" "ingress-controller" { resource "aws_route53_record" "c_name_record" { provider = aws.shared-services - zone_id = data.aws_route53_zone.zone[0].zone_id + zone_id = data.aws_route53_zone.zone.0.zone_id name = "*.${local.domain_name}" type = "CNAME" - records = [data.kubernetes_service.ingress-controller.status[0].load_balancer[0].ingress[0].hostname] + records = [data.kubernetes_service.ingress-controller.status.0.load_balancer.0.ingress.0.hostname] ttl = 300 } diff --git a/k8s/aws/eks/fluentbit.tf b/k8s/aws/eks/fluentbit.tf index 89d988f6..de10217e 100644 --- a/k8s/aws/eks/fluentbit.tf +++ b/k8s/aws/eks/fluentbit.tf @@ -1,32 +1,32 @@ locals { - fluent_bit_enable = var.fluent_bit != null ? (var.fluent_bit.enable != null ? var.fluent_bit.enable : false) : false - fluent_bit_cloud_watch_enable = var.fluent_bit != null ? (var.fluent_bit.cloud_watch_enable != null ? var.fluent_bit.cloud_watch_enable : false) : false - fluent_bit_loki = local.fluent_bit_enable ? (var.fluent_bit.loki != null ? var.fluent_bit.loki : []) : [] - fluent_bit_http = local.fluent_bit_enable ? (var.fluent_bit.http != null ? var.fluent_bit.http : []) : [] - fluent_bit_splunk = local.fluent_bit_enable ? (var.fluent_bit.splunk != null ? var.fluent_bit.splunk : []) : [] - fluent_bit_datadog = local.fluent_bit_enable ? (var.fluent_bit.datadog != null ? var.fluent_bit.datadog : []) : [] - fluent_bit_newrelic = local.fluent_bit_enable ? (var.fluent_bit.new_relic != null ? var.fluent_bit.new_relic : []) : [] - fluent_bit_slack = local.fluent_bit_enable ? (var.fluent_bit.slack != null ? var.fluent_bit.slack : []) : [] + fluent_bit_enable = var.fluent_bit != null ? (var.fluent_bit.enable != null ? var.fluent_bit.enable : false): false + fluent_bit_cloud_watch_enable = var.fluent_bit != null ? (var.fluent_bit.cloud_watch_enable != null ? var.fluent_bit.cloud_watch_enable : false): false + fluent_bit_loki = local.fluent_bit_enable ? (var.fluent_bit.loki != null ? var.fluent_bit.loki : []) : [] + fluent_bit_http = local.fluent_bit_enable ? (var.fluent_bit.http != null ? var.fluent_bit.http : []) : [] + fluent_bit_splunk = local.fluent_bit_enable ? (var.fluent_bit.splunk != null ? var.fluent_bit.splunk : []): [] + fluent_bit_datadog = local.fluent_bit_enable ? (var.fluent_bit.datadog != null ? var.fluent_bit.datadog : []): [] + fluent_bit_newrelic = local.fluent_bit_enable ? (var.fluent_bit.new_relic != null ? var.fluent_bit.new_relic : []): [] + fluent_bit_slack = local.fluent_bit_enable ? (var.fluent_bit.slack != null ? var.fluent_bit.slack : []): [] fluent_bit_loki_outputs = concat([ - for k, v in local.fluent_bit_loki : { + for k,v in local.fluent_bit_loki : { host = v.host tenant_id = v.tenant_id != null ? v.tenant_id : "" labels = v.labels port = v.port != null ? v.port : 3100 - tls = v.tls != null ? v.tls : "On" + tls = v.tls != null ? v.tls : "On" } if length(local.fluent_bit_loki) > 0 - ], local.enable_loki ? [{ - host = "loki-distributor.loki" - tenant_id = random_uuid.grafana_standard_datasource_header_value.result - labels = "namespace=$(kubernetes['namespace_name']),pod=$(kubernetes['pod_name']),service=$(kubernetes['container_name']),cluster=${local.cluster_name}" - port = 3100 - tls = "Off" - }] : []) + ], local.enable_loki ? [{ + host = "loki-distributor.loki" + tenant_id = random_uuid.grafana_standard_datasource_header_value.result + labels = "namespace=$(kubernetes['namespace_name']),pod=$(kubernetes['pod_name']),service=$(kubernetes['container_name']),cluster=${local.cluster_name}" + port = 3100 + tls = "Off" + }]: []) fluent_bit_http_outputs = [ - for k, v in local.fluent_bit_http : { + for k,v in local.fluent_bit_http : { host = v.host port = v.port != null ? v.port : 80 uri = v.uri != null ? v.uri : "/" @@ -37,7 +37,7 @@ locals { ] fluent_bit_splunk_outputs = [ - for k, v in local.fluent_bit_splunk : { + for k,v in local.fluent_bit_splunk : { host = v.host token = v.token port = v.port != null ? v.port : 8088 @@ -47,25 +47,25 @@ locals { ] fluent_bit_datadog_outputs = [ - for k, v in local.fluent_bit_datadog : { - host = v.host - api_key = v.api_key - tls = v.tls != null ? v.tls : "On" - compress = v.compress != null ? v.compress : "gzip" + for k,v in local.fluent_bit_datadog : { + host = v.host + api_key = v.api_key + tls = v.tls != null ? v.tls : "On" + compress = v.compress != null ? v.compress : "gzip" } if length(local.fluent_bit_datadog) > 0 ] fluent_bit_newrelic_outputs = [ - for k, v in local.fluent_bit_newrelic : { - host = v.host != null ? v.host : "https://log-api.eu.newrelic.com/log/v1" - api_key = v.api_key - compress = v.compress != null ? v.compress : "gzip" + for k,v in local.fluent_bit_newrelic : { + host = v.host != null ? v.host : "https://log-api.eu.newrelic.com/log/v1" + api_key = v.api_key + compress = v.compress != null ? v.compress : "gzip" } if length(local.fluent_bit_newrelic) > 0 ] fluent_bit_slack_outputs = [ - for k, v in local.fluent_bit_slack : { - webhook = v.webhook + for k,v in local.fluent_bit_slack : { + webhook = v.webhook } if length(local.fluent_bit_slack) > 0 ] @@ -114,13 +114,13 @@ data "aws_iam_policy_document" "fluent_bit_policy" { } } -data template_file "fluent-bit" { +data template_file "fluent-bit"{ count = local.fluent_bit_enable ? 1 : 0 template = file("./templates/fluent-bit-values.yaml") - vars = { - "CLUSTER_NAME" = local.cluster_name - "AWS_REGION" = var.app_region - "TAGS" = join(",", [for key, value in local.common_tags : "${key}=${value}"]) + vars = { + "CLUSTER_NAME" = local.cluster_name + "AWS_REGION" = var.app_region + "TAGS" = join(",", [for key, value in local.common_tags : "${key}=${value}"]) "HTTP_SERVER" = "On" "HTTP_PORT" = "2020" @@ -129,22 +129,22 @@ data template_file "fluent-bit" { "READ_FROM_TAIL" = "On" fluent_bit_cloud_watch_enable = local.fluent_bit_cloud_watch_enable - fluent_bit_loki_outputs = jsonencode(local.fluent_bit_loki_outputs) - fluent_bit_http_outputs = jsonencode(local.fluent_bit_http_outputs) - fluent_bit_splunk_outputs = jsonencode(local.fluent_bit_splunk_outputs) - fluent_bit_datadog_outputs = jsonencode(local.fluent_bit_datadog_outputs) - fluent_bit_newrelic_outputs = jsonencode(local.fluent_bit_newrelic_outputs) - fluent_bit_slack_outputs = jsonencode(local.fluent_bit_slack_outputs) + fluent_bit_loki_outputs = jsonencode(local.fluent_bit_loki_outputs) + fluent_bit_http_outputs = jsonencode(local.fluent_bit_http_outputs) + fluent_bit_splunk_outputs = jsonencode(local.fluent_bit_splunk_outputs) + fluent_bit_datadog_outputs = jsonencode(local.fluent_bit_datadog_outputs) + fluent_bit_newrelic_outputs = jsonencode(local.fluent_bit_newrelic_outputs) + fluent_bit_slack_outputs = jsonencode(local.fluent_bit_slack_outputs) } } resource "helm_release" "fluntbit-config" { - count = local.fluent_bit_enable ? 1 : 0 + count = local.fluent_bit_enable ? 1 : 0 repository = "https://fluent.github.io/helm-charts" chart = "fluent-bit" name = "fluent-bit" version = "0.36.0" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name values = [ data.template_file.fluent-bit[0].rendered diff --git a/k8s/aws/eks/grafana.tf b/k8s/aws/eks/grafana.tf index db969de0..67042a42 100644 --- a/k8s/aws/eks/grafana.tf +++ b/k8s/aws/eks/grafana.tf @@ -1,29 +1,29 @@ locals { ### This is list of grafana datasources - grafana_datasource_list = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.datasource_list != null ? var.observability_config.grafana.configs.datasource_list : {}) : {}, {}) + grafana_datasource_list = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.datasource_list != null ? var.observability_config.grafana.configs.datasource_list : {}) : {}, {}) grafana_db_deletion_protection = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.persistence.deletion_protection != null ? var.observability_config.grafana.persistence.deletion_protection : true) : true, true) - grafana_allowed_domains = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.domains != null ? join(",", var.observability_config.grafana.configs.domains) : "") : "", "") - prometheus_enable = try(var.observability_config.prometheus != null ? var.observability_config.prometheus.enable : true, true) - grafana_enable = try(var.observability_config.grafana != null ? var.observability_config.grafana.enable : false, false) - grafana_host = try(var.observability_config.grafana.url != null ? var.observability_config.grafana.url : (local.domain_name != "" && !var.public_ingress ? "grafana.${local.domain_name}" : ""), "") + grafana_allowed_domains = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.domains != null ? join(",", var.observability_config.grafana.configs.domains) : "") : "", "") + prometheus_enable = try(var.observability_config.prometheus != null ? var.observability_config.prometheus.enable : true, true) + grafana_enable = try(var.observability_config.grafana != null ? var.observability_config.grafana.enable : false, false) + grafana_host = try(var.observability_config.grafana.url != null ? var.observability_config.grafana.url : (local.domain_name != "" && !var.public_ingress ? "grafana.${local.domain_name}" : ""), "") } resource "random_password" "observability_admin" { - count = local.grafana_enable ? 1 : 0 - length = 16 - special = false + count = local.grafana_enable ? 1 : 0 + length = 16 + special = false } data "template_file" "grafana_template" { - count = local.grafana_enable ? 1 : 0 + count = local.grafana_enable ? 1 : 0 template = file("./templates/grafana-values.yaml") vars = { NAMESPACE = "monitoring" GRAFANA_HOST = local.grafana_host GRAFANA_ENABLED = local.grafana_enable GRAFANA_TLS_HOST = "*.${local.domain_name}" - GRAFANA_OBS_ADMIN_PASSWORD = try(local.grafana_enable ? try(random_password.observability_admin[0].result, "") : "", "") + GRAFANA_OBS_ADMIN_PASSWORD = try(local.grafana_enable ? try(random_password.observability_admin.0.result, "") : "", "") CLUSTER_NAME = var.app_name PERSISTENCE_TYPE_DB = try(var.observability_config.grafana.persistence.type == "db" ? true : false, false) PERSISTENCE_TYPE_PVC = try(var.observability_config.grafana.persistence.type == "pvc" ? true : false, false) @@ -36,9 +36,9 @@ data "template_file" "grafana_template" { GRAFANA_MIN_REPLICA = try(var.observability_config.grafana.min_replica != null ? var.observability_config.grafana.min_replica : 1, 1) GRAFANA_MAX_REPLICA = try(var.observability_config.grafana.max_replica != null ? var.observability_config.grafana.max_replica : 10, 10) GRAFANA_REQUEST_MEMORY = try(var.observability_config.grafana.request_memory != null ? var.observability_config.grafana.request_memory : "100Mi", "100Mi") - GRAFANA_REQUEST_CPU = try(var.observability_config.grafana.request_cpu != null ? var.observability_config.grafana.request_cpu : "100m", "100m") - GRAFANA_LIMIT_MEMORY = try(var.observability_config.grafana.limit_memory != null ? var.observability_config.grafana.limit_memory : "500Mi", "500Mi") - GRAFANA_LIMIT_CPU = try(var.observability_config.grafana.limit_cpu != null ? var.observability_config.grafana.limit_cpu : "500m", "500m") + GRAFANA_REQUEST_CPU = try( var.observability_config.grafana.request_cpu != null ? var.observability_config.grafana.request_cpu : "100m", "100m") + GRAFANA_LIMIT_MEMORY = try(var.observability_config.grafana.limit_memory != null ? var.observability_config.grafana.limit_memory: "500Mi", "500Mi") + GRAFANA_LIMIT_CPU = try( var.observability_config.grafana.limit_cpu != null ? var.observability_config.grafana.limit_cpu : "500m", "500m") GRAFANA_DASHBOARD_LIMIT_MEMORY = try(var.observability_config.grafana.dashboard.limit_memory != null ? var.observability_config.grafana.dashboard.limit_memory : "512Mi", "512Mi") GRAFANA_DASHBOARD_LIMIT_CPU = try(var.observability_config.grafana.dashboard.limit_cpu != null ? var.observability_config.grafana.dashboard.limit_cpu : "512m", "512m") GRAFANA_DASHBOARD_REQUEST_MEMORY = try(var.observability_config.grafana.dashboard.request_memory != null ? var.observability_config.grafana.dashboard.request_memory : "256Mi", "256Mi") @@ -47,7 +47,7 @@ data "template_file" "grafana_template" { GRAFANA_DATASOURCE_LIMIT_CPU = try(var.observability_config.grafana.datasource.limit_cpu != null ? var.observability_config.grafana.datasource.limit_cpu : "512m", "512m") GRAFANA_DATASOURCE_REQUEST_MEMORY = try(var.observability_config.grafana.datasource.request_memory != null ? var.observability_config.grafana.datasource.request_memory : "256Mi", "256Mi") GRAFANA_DATASOURCE_REQUEST_CPU = try(var.observability_config.grafana.datasource.request_cpu != null ? var.observability_config.grafana.datasource.request_cpu : "256m", "256m") - ENABLE_SSO = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? var.observability_config.grafana.configs.enable_sso : false) : false, false) + ENABLE_SSO = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? var.observability_config.grafana.configs.enable_sso : false) :false, false) ALLOWED_DOMAINS = local.grafana_enable ? local.grafana_allowed_domains : "" OAUTH_ID = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? data.aws_secretsmanager_secret_version.oauth_client_id[0].secret_string : null) : null, null) OAUTH_SECRET = try(var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? data.aws_secretsmanager_secret_version.oauth_client_secret[0].secret_string : null) : null, null) @@ -55,12 +55,12 @@ data "template_file" "grafana_template" { } resource "helm_release" "grafana" { - count = local.grafana_enable ? 1 : 0 - chart = "grafana" - name = "grafana" - namespace = kubernetes_namespace.monitoring.metadata[0].name - version = try(var.observability_config.grafana.version != null ? var.observability_config.grafana.version : "8.3.0", "8.3.0") - timeout = 1200 + count = local.grafana_enable ? 1 : 0 + chart = "grafana" + name = "grafana" + namespace = kubernetes_namespace.monitoring.metadata.0.name + version = try(var.observability_config.grafana.version != null ? var.observability_config.grafana.version : "8.3.0", "8.3.0") + timeout = 1200 repository = "https://grafana.github.io/helm-charts" @@ -71,9 +71,9 @@ resource "helm_release" "grafana" { } resource "kubernetes_config_map" "grafana_custom_datasource" { - for_each = { for k, v in local.grafana_datasource_list : k => v } + for_each = {for k,v in local.grafana_datasource_list : k => v} metadata { - name = "grafana-${each.key}-datasource" + name = "grafana-${each.key}-datasource" namespace = helm_release.grafana[0].namespace labels = { grafana_datasource = "1" @@ -83,10 +83,10 @@ resource "kubernetes_config_map" "grafana_custom_datasource" { data = { "datasource.yaml" = templatefile("${path.module}/templates/grafana-custom-datasource.yaml", { - tempo_datasource = local.enable_tempo - loki_datasource = local.enable_loki - mimir_datasource = local.enable_mimir - datasource_name = each.key + tempo_datasource = local.enable_tempo + loki_datasource = local.enable_loki + mimir_datasource = local.enable_mimir + datasource_name = each.key datasource_header_value = each.value } ) @@ -101,7 +101,7 @@ resource "kubernetes_config_map" "grafana_standard_datasource" { metadata { name = "grafana-standard-datasource" namespace = helm_release.grafana[0].namespace - labels = { + labels = { grafana_datasource = "1" } } @@ -109,14 +109,14 @@ resource "kubernetes_config_map" "grafana_standard_datasource" { data = { "datasource.yaml" = templatefile("./templates/grafana-standard-datasource.yaml", { - datasource_name = local.cluster_name + datasource_name = local.cluster_name datasource_header_value = random_uuid.grafana_standard_datasource_header_value.result - mimir_create = local.enable_mimir - loki_create = local.enable_loki - tempo_create = local.enable_tempo - cortex_create = local.enable_cortex - prometheus_create = local.prometheus_enable - }) + mimir_create = local.enable_mimir + loki_create = local.enable_loki + tempo_create = local.enable_tempo + cortex_create = local.enable_cortex + prometheus_create = local.prometheus_enable + }) } } @@ -125,38 +125,38 @@ resource "kubernetes_config_map" "grafana_service_dashboard" { metadata { name = "grafana-service-dashboard" namespace = helm_release.grafana[0].namespace - labels = { + labels = { grafana_dashboard = "1" } } data = { - "kong.json" = file("./templates/kong-official.json") - "cronjob.json" = file("./templates/cronjob.json") - "partner-standard-api.json" = file("./templates/partner-standard-api.json") - "cortex-disk-utilization.json" = file("./templates/cortex-disk-utilization.json") - "prometheus-disk-utilization.json" = file("./templates/prometheus-disk-utilization.json") + "kong.json" = file("./templates/kong-official.json") + "cronjob.json" = file("./templates/cronjob.json") + "partner-standard-api.json" = file("./templates/partner-standard-api.json") + "cortex-disk-utilization.json" = file("./templates/cortex-disk-utilization.json") + "prometheus-disk-utilization.json" = file("./templates/prometheus-disk-utilization.json") } } resource "aws_secretsmanager_secret" "observability_admin" { - count = local.grafana_enable ? 1 : 0 + count = local.grafana_enable ? 1 : 0 - name = "${local.cluster_name}-grafana-admin-secret" - tags = local.common_tags + name = "${local.cluster_name}-grafana-admin-secret" + tags = local.common_tags } resource "aws_secretsmanager_secret_version" "observability_admin" { - count = local.grafana_enable ? 1 : 0 + count = local.grafana_enable ? 1 : 0 - secret_id = aws_secretsmanager_secret.observability_admin[0].id - secret_string = random_password.observability_admin[0].result + secret_id = aws_secretsmanager_secret.observability_admin.0.id + secret_string = random_password.observability_admin.0.result } data "aws_secretsmanager_secret" "oauth_client_id" { - count = local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 - name = "${local.cluster_name}-oauth-client-id" + count = local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 + name = "${local.cluster_name}-oauth-client-id" } data "aws_secretsmanager_secret_version" "oauth_client_id" { @@ -165,19 +165,19 @@ data "aws_secretsmanager_secret_version" "oauth_client_id" { } data "aws_secretsmanager_secret" "oauth_client_secret" { - count = local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 - name = "${local.cluster_name}-oauth-client-secret" + count = local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 + name = "${local.cluster_name}-oauth-client-secret" } data "aws_secretsmanager_secret_version" "oauth_client_secret" { - count = local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 + count = local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 secret_id = data.aws_secretsmanager_secret.oauth_client_secret[0].id } module "rds" { - source = "../../../sql/aws-rds" + source = "../../../sql/aws-rds" - count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) + count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) cluster_name = local.cluster_name namespace = "monitoring" @@ -186,8 +186,8 @@ module "rds" { vpc_id = local.vpc_id ext_rds_sg_cidr_block = local.ext_rds_sg_cidr_block rds_name = "${local.cluster_name}-monitoring-sql-db" - read_replica = false - admin_user = "postgresadmin" + read_replica = false + admin_user = "postgresadmin" databases = ["grafana"] rds_type = "postgresql" allocated_storage = 10 @@ -201,5 +201,5 @@ module "rds" { log_min_duration_statement = -1 postgresql_engine_version = "13.7" - tags = local.common_tags + tags = local.common_tags } \ No newline at end of file diff --git a/k8s/aws/eks/kafka.tf b/k8s/aws/eks/kafka.tf index 0abe85f2..89ef45e3 100644 --- a/k8s/aws/eks/kafka.tf +++ b/k8s/aws/eks/kafka.tf @@ -14,7 +14,7 @@ data "aws_secretsmanager_secret" "kafka_secert_msk" { data "aws_secretsmanager_secret_version" "kafka_secert_version" { count = length(data.aws_secretsmanager_secrets.kafka_secert.arns) == 0 ? 0 : 1 - secret_id = data.aws_secretsmanager_secret.kafka_secert_msk[0].id + secret_id = data.aws_secretsmanager_secret.kafka_secert_msk.0.id } resource "aws_secretsmanager_secret" "local_kafka" { @@ -25,6 +25,6 @@ resource "aws_secretsmanager_secret" "local_kafka" { resource "aws_secretsmanager_secret_version" "local_kafka" { count = length(data.aws_secretsmanager_secrets.kafka_secert.arns) == 0 ? 0 : 1 - secret_id = aws_secretsmanager_secret.local_kafka[0].id - secret_string = jsondecode(data.aws_secretsmanager_secret_version.kafka_secert_version[0].secret_string).password + secret_id = aws_secretsmanager_secret.local_kafka.0.id + secret_string = jsondecode(data.aws_secretsmanager_secret_version.kafka_secert_version.0.secret_string).password } \ No newline at end of file diff --git a/k8s/aws/eks/main.tf b/k8s/aws/eks/main.tf index 0e584e4f..c261458a 100644 --- a/k8s/aws/eks/main.tf +++ b/k8s/aws/eks/main.tf @@ -1,16 +1,26 @@ locals { - cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" - node_port = 32443 # Node port which will be used by LB for exposure - inbound_ip = concat(["10.0.0.0/8"], var.custom_inbound_ip_range) + cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" + node_port = 32443 # Node port which will be used by LB for exposure + inbound_ip = concat(["10.0.0.0/8"], var.custom_inbound_ip_range) cluster_name_parts = split("-", local.cluster_name) environment = var.app_env == "" ? element(local.cluster_name_parts, length(local.cluster_name_parts) - 1) : var.app_env namespaces = [for namespace in var.namespace_folder_list : split("/", namespace)[0]] - common_tags = merge(var.common_tags, + common_tags = merge(var.common_tags, tomap({ - project = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name, local.cluster_name) + project = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name ,local.cluster_name) provisioner = try(var.standard_tags.provisioner != null ? var.standard_tags.provisioner : "zop-dev", "zop-dev") - })) + })) + + namespace_users = flatten([ + for key, value in var.app_namespaces:[ + for user in concat(value.admins, value.editors, value.viewers) : + { + namespace = key + name = user + } + ] + ]) } @@ -25,7 +35,7 @@ resource "aws_kms_key" "eks" { } data "aws_ami" "eks_ami" { - owners = [var.worker_ami_config.owner_id] + owners = [var.worker_ami_config.owner_id] filter { name = "name" values = [var.worker_ami_config.name] @@ -33,26 +43,26 @@ data "aws_ami" "eks_ami" { } module "eks" { - source = "terraform-aws-modules/eks/aws" - version = "20.0.0" + source = "terraform-aws-modules/eks/aws" + version = "20.0.0" cluster_name = local.cluster_name cluster_version = "1.32" - enable_irsa = true - vpc_id = local.vpc_id - subnet_ids = local.private_subnet_ids - control_plane_subnet_ids = local.private_subnet_ids + enable_irsa = true + vpc_id = local.vpc_id + subnet_ids = local.private_subnet_ids + control_plane_subnet_ids = local.private_subnet_ids enable_cluster_creator_admin_permissions = true // Enable Control Plane Logging - cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] // Enabled Cluster encryption cluster_encryption_config = { - provider_key_arn = aws_kms_key.eks.arn - resources = ["secrets"] - } + provider_key_arn = aws_kms_key.eks.arn + resources = ["secrets"] + } // Cluster endpoint should not have public access cluster_endpoint_private_access = false @@ -66,13 +76,13 @@ module "eks" { } self_managed_node_groups = { - (local.cluster_name) = { - ami_id = data.aws_ami.eks_ami.id - instance_type = var.node_config.node_type - desired_size = var.node_config.min_count - min_size = var.node_config.min_count - max_size = var.node_config.max_count - bootstrap_extra_args = "--container-runtime containerd" + "${local.cluster_name}" = { + ami_id = data.aws_ami.eks_ami.id + instance_type = var.node_config.node_type + desired_size = var.node_config.min_count + min_size = var.node_config.min_count + max_size = var.node_config.max_count + bootstrap_extra_args = "--container-runtime containerd" # vpc_security_group_ids = var.internal_loadbalancer ? [aws_security_group.worker_group_mgmt.id] : [aws_security_group.external_worker_group_mgmt.id] # target_group_arns = var.public_ingress ? [aws_lb_target_group.cluster_tg.0.arn,aws_lb_target_group.kong_tg_admin.0.arn] : (var.public_app ? [aws_lb_target_group.cluster_alb_tg.0.arn] : [aws_lb_target_group.cluster_nlb_tg.0.arn]) # user_data_template_path = file("./templates/user-data.tpl") @@ -84,8 +94,8 @@ module "eks" { } tags = merge(local.common_tags, - tomap({ - "Name" = local.cluster_name - }) + tomap({ + "Name" = local.cluster_name + }) ) } diff --git a/k8s/aws/eks/observability.tf b/k8s/aws/eks/observability.tf index d81b4239..e5379955 100644 --- a/k8s/aws/eks/observability.tf +++ b/k8s/aws/eks/observability.tf @@ -6,15 +6,15 @@ locals { } module "observability" { - count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1 : 0 + count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1: 0 source = "../../../observability/aws" app_name = var.app_name app_region = var.app_region app_env = var.app_env observability_suffix = var.observability_config.suffix - access_key = aws_iam_access_key.observability_s3_user[0].id - access_secret = aws_iam_access_key.observability_s3_user[0].secret + access_key = aws_iam_access_key.observability_s3_user.0.id + access_secret = aws_iam_access_key.observability_s3_user.0.secret domain_name = local.domain_name cluster_name = local.cluster_name loki = var.observability_config.loki @@ -25,7 +25,7 @@ module "observability" { } resource "aws_iam_policy" "observability_s3_iam_policy" { - count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1 : 0 + count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1: 0 name = "observability-${local.environment}-policy" description = "IAM policy for Observability Cluster to access S3" @@ -33,9 +33,9 @@ resource "aws_iam_policy" "observability_s3_iam_policy" { Version = "2012-10-17", Statement = [ { - "Sid" : "AllObjectActions", - "Effect" : "Allow", - "Action" : [ + "Sid": "AllObjectActions", + "Effect": "Allow", + "Action": [ "s3:PutObject", "s3:GetObject", "s3:DeleteObject" @@ -45,24 +45,24 @@ resource "aws_iam_policy" "observability_s3_iam_policy" { ] }, { - "Sid" : "BucketManagement", - "Effect" : "Allow", - "Action" : [ + "Sid": "BucketManagement", + "Effect": "Allow", + "Action": [ "s3:CreateBucket", "s3:ListBucket" ], Resource = "arn:aws:s3:::${local.cluster_name}-*-${var.observability_config.suffix}" }, { - "Sid" : "ListAllBuckets", - "Effect" : "Allow", - "Action" : "s3:ListAllMyBuckets", - "Resource" : "*" + "Sid": "ListAllBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" }, { - "Sid" : "DynamoDB", - "Effect" : "Allow", - "Action" : [ + "Sid": "DynamoDB", + "Effect": "Allow", + "Action": [ "dynamodb:CreateTable", "dynamodb:DescribeTable", "dynamodb:Query", @@ -71,43 +71,43 @@ resource "aws_iam_policy" "observability_s3_iam_policy" { "dynamodb:ListTables", "dynamodb:ListTagsOfResource" ], - "Resource" : "*" + "Resource": "*" } ] }) } resource "aws_iam_user" "observability_s3_user" { - count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1 : 0 - name = "${local.cluster_name}-s3-user" - tags = local.common_tags + count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1: 0 + name = "${local.cluster_name}-s3-user" + tags = local.common_tags } resource "aws_iam_user_policy_attachment" "observability_s3_attach" { - count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1 : 0 - user = aws_iam_user.observability_s3_user[0].name - policy_arn = aws_iam_policy.observability_s3_iam_policy[0].arn + count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1: 0 + user = aws_iam_user.observability_s3_user.0.name + policy_arn = aws_iam_policy.observability_s3_iam_policy.0.arn } -resource "aws_iam_access_key" "observability_s3_user" { - count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1 : 0 - user = aws_iam_user.observability_s3_user[0].name +resource "aws_iam_access_key" "observability_s3_user"{ + count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1: 0 + user = aws_iam_user.observability_s3_user.0.name } resource "aws_secretsmanager_secret" "observability_s3_user" { - count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1 : 0 + count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1: 0 name = "${local.cluster_name}-s3-user-secret-key" } -resource "aws_secretsmanager_secret_version" "observability_s3_user" { - count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1 : 0 - secret_id = aws_secretsmanager_secret.observability_s3_user[0].id - secret_string = jsonencode({ username = aws_iam_user.observability_s3_user[0].name, access_key = aws_iam_access_key.observability_s3_user[0].user, - access_secret = aws_iam_access_key.observability_s3_user[0].secret }) +resource "aws_secretsmanager_secret_version" "observability_s3_user"{ + count = (local.enable_cortex || local.enable_loki || local.enable_tempo || local.enable_mimir) ? 1: 0 + secret_id = aws_secretsmanager_secret.observability_s3_user.0.id + secret_string = jsonencode({ username = aws_iam_user.observability_s3_user.0.name, access_key = aws_iam_access_key.observability_s3_user.0.user, + access_secret = aws_iam_access_key.observability_s3_user.0.secret }) } resource "kubernetes_service" "db_service" { - count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) + count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) metadata { name = "monitoring-rds" namespace = "db" diff --git a/k8s/aws/eks/outputs.tf b/k8s/aws/eks/outputs.tf index 9d2721c2..0cc3907f 100644 --- a/k8s/aws/eks/outputs.tf +++ b/k8s/aws/eks/outputs.tf @@ -46,7 +46,7 @@ output "cluster_security_group_id" { value = module.eks.cluster_security_group_id } -output "k8s_token" { +output "k8s_token"{ value = data.aws_eks_cluster_auth.cluster.token sensitive = true } @@ -55,8 +55,8 @@ output "k8s_ca" { value = module.eks.cluster_certificate_authority_data } -output "oidc_role" { - value = module.iam_assumable_role_admin.this_iam_role_arn +output "oidc_role"{ + value = module.iam_assumable_role_admin.this_iam_role_arn } output "oidc_issuer_url" { @@ -64,7 +64,7 @@ output "oidc_issuer_url" { } output "kafka_host" { - value = try(jsondecode(data.aws_secretsmanager_secret_version.kafka_secert_version[0].secret_string).kafka_host, "null") + value = try(jsondecode(data.aws_secretsmanager_secret_version.kafka_secert_version.0.secret_string).kafka_host, "null") sensitive = true } @@ -74,7 +74,7 @@ output "kafka_password" { } output "kafka_username" { - value = try(jsondecode(data.aws_secretsmanager_secret_version.kafka_secert_version[0].secret_string).username, "null") + value = try(jsondecode(data.aws_secretsmanager_secret_version.kafka_secert_version.0.secret_string).username, "null") sensitive = true } @@ -107,7 +107,7 @@ output "public_subnets_id" { } output "mimir_host_url" { - value = try(module.observability[0].mimir_host_url, "") + value = try(module.observability[0].mimir_host_url,"") } output "mimir_basic_auth_username" { @@ -123,7 +123,7 @@ output "mimir_basic_auth_password" { } output "loki_host_url" { - value = try(module.observability[0].loki_host_url, "") + value = try(module.observability[0].loki_host_url,"") } output "cluster_uid" { @@ -131,16 +131,16 @@ output "cluster_uid" { } output "tempo_host_url" { - value = try(module.observability[0].tempo_host_url, "") + value =try( module.observability[0].tempo_host_url,"") } output "cortex_host_url" { - value = try(module.observability[0].cortex_host_url, "") + value = try(module.observability[0].cortex_host_url,"") } output "grafana_password" { sensitive = true - value = try(random_password.observability_admin[0].result, "") + value = try(random_password.observability_admin[0].result,"") } output "grafana_admin" { @@ -148,14 +148,14 @@ output "grafana_admin" { } output "grafana_host" { - value = try(local.grafana_host, "") + value = try(local.grafana_host,"") } output "grafana_datasources" { - value = local.grafana_datasource_list + value = local.grafana_datasource_list sensitive = true } output "lbip" { - value = data.kubernetes_service.ingress-controller.status[0].load_balancer[0].ingress[0].hostname + value = data.kubernetes_service.ingress-controller.status.0.load_balancer.0.ingress.0.hostname } \ No newline at end of file diff --git a/k8s/aws/eks/prometheus.tf b/k8s/aws/eks/prometheus.tf index 7bcf0c30..a68aa3ac 100644 --- a/k8s/aws/eks/prometheus.tf +++ b/k8s/aws/eks/prometheus.tf @@ -20,22 +20,22 @@ resource "kubernetes_secret" "prometheus_remote_write_auth" { type = "Opaque" } -locals { +locals{ # this is namespace level alerts: - namespace_teams_webhook = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = substr(v.data, 8, length(v.data)), labels = v.labels == null ? merge(v.labels, { severity = "critical", servicealert = "true", namespace = n }) : merge(v.labels, { namespace = n }), } if v.type == "teams" } if s.alert_webhooks != null]...) - namespace_google_chat_alerts = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = v.data, labels = v.labels == null ? merge(v.labels, { severity = "critical", servicealert = "true", namespace = n }) : merge(v.labels, { namespace = n }), } if v.type == "google_chat" } if s.alert_webhooks != null]...) + namespace_teams_webhook = merge([for n, s in var.app_namespaces : {for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = substr(v.data, 8, length(v.data)), labels = v.labels == null ? merge(v.labels, {severity = "critical", servicealert = "true",namespace = n}) : merge(v.labels, {namespace = n}), } if v.type == "teams"} if s.alert_webhooks != null]...) + namespace_google_chat_alerts = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = v.data, labels = v.labels == null ? merge(v.labels, {severity = "critical", servicealert = "true",namespace = n}) : merge(v.labels, {namespace = n}), } if v.type == "google_chat"}if s.alert_webhooks != null]...) # this is cluster level alerts: - cluster_teams_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "cluster-webhook-${key}" => { data = substr(val.data, 8, length(val.data)), labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "teams" } - cluster_moogsoft_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "moogsoft-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "moogsoft" } - cluster_pagerduty_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "pagerduty-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "pagerduty" } - cluster_google_chat_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "google-chat-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "google_chat" } - cluster_slack_alerts = jsonencode(var.slack_alerts_configs) == "" ? {} : { for key, val in var.slack_alerts_configs : "slack-alert-${val.name}" => { url = val.url, channel = val.channel, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } } + cluster_teams_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "cluster-webhook-${key}" => { data = substr(val.data,8 ,length(val.data) ),labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "teams"} + cluster_moogsoft_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "moogsoft-webhook-${key}" => { data = val.data,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "moogsoft"} + cluster_pagerduty_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "pagerduty-webhook-${key}" => { data = val.data,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "pagerduty"} + cluster_google_chat_alerts= jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "google-chat-webhook-${key}" => {data = val.data, labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "google_chat"} + cluster_slack_alerts = jsonencode(var.slack_alerts_configs) == "" ? {} : { for key, val in var.slack_alerts_configs : "slack-alert-${val.name}" => {url = val.url, channel = val.channel,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, }} - cluster_alerts = merge(local.namespace_teams_webhook, local.cluster_teams_alerts) - cluster_alerts_webhook = merge(local.cluster_alerts, local.cluster_moogsoft_alerts, local.cluster_pagerduty_alerts) - cluster_webhook_alerts = jsonencode(var.webhook_alerts_configs) == "" ? {} : { for key, val in var.webhook_alerts_configs : "webhook-alert-${val.name}" => { url = val.url, send_resolved = val.send_resolved, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } } - google_chat_alerts = merge(local.cluster_google_chat_alerts, local.namespace_google_chat_alerts) + cluster_alerts = merge(local.namespace_teams_webhook, local.cluster_teams_alerts) + cluster_alerts_webhook = merge(local.cluster_alerts, local.cluster_moogsoft_alerts, local.cluster_pagerduty_alerts) + cluster_webhook_alerts = jsonencode(var.webhook_alerts_configs) == "" ? {} : { for key, val in var.webhook_alerts_configs : "webhook-alert-${val.name}" => {url = val.url, send_resolved = val.send_resolved, labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, }} + google_chat_alerts = merge( local.cluster_google_chat_alerts, local.namespace_google_chat_alerts) # Create secrets for user-provided remote write configs with basic auth prometheus_remote_write_secrets = try(var.observability_config.prometheus.remote_write, null) != null ? { @@ -55,9 +55,9 @@ locals { ] : [] default_remote_write_config = local.enable_mimir && local.prometheus_enable ? [{ - host = "http://mimir-distributor.mimir:8080/api/v1/push" - key = "X-Scope-OrgID" - value = random_uuid.grafana_standard_datasource_header_value.result + host = "http://mimir-distributor.mimir:8080/api/v1/push" + key = "X-Scope-OrgID" + value = random_uuid.grafana_standard_datasource_header_value.result secret_name = null }] : [] @@ -68,31 +68,31 @@ data "template_file" "prom_template" { count = local.prometheus_enable ? 1 : 0 template = file("./templates/prometheus-values.yaml") - vars = { - PROMETHEUS_DISK_SIZE = try(var.observability_config.prometheus.persistence.disk_size != null ? var.observability_config.prometheus.persistence.disk_size : "50Gi", "50Gi") - PROMETHEUS_RETENTION_SIZE = try(var.observability_config.prometheus.persistence.retention_size != null ? var.observability_config.prometheus.persistence.retention_size : "45GB", "45GB") - PROMETHEUS_RETENTION_DURATION = try(var.observability_config.prometheus.persistence.retention_duration != null ? var.observability_config.prometheus.persistence.retention_duration : "10d", "10d") - REMOTE_WRITE_CONFIGS = jsonencode(local.remote_write_config) - CLUSTER_NAME = local.cluster_name - ALERTS_ENABLED = jsonencode(local.cluster_moogsoft_alerts) != "" || jsonencode(local.namespace_teams_webhook) != "" || jsonencode(local.cluster_teams_alerts) != "" || jsonencode(local.google_chat_alerts) != "" || jsonencode(local.cluster_slack_alerts) != "" || jsonencode(local.cluster_webhook_alerts) != "" ? true : false - MOOGSOFT_ALERTS_ENABLED = local.cluster_moogsoft_alerts == {} ? false : true - MS_TEAMS_ALERT_ENABLED = jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? false : true - MOOGSOFT_ENDPOINT_URL = jsonencode(local.cluster_moogsoft_alerts) - MOOGSOFT_ENDPOINT_API_KEY = var.moogsoft_endpoint_api_key - MOOGSOFT_USERNAME = var.moogsoft_username - teams_webhook_alerts = jsonencode(local.cluster_alerts) - cluster_teams_alerts = jsonencode(local.cluster_alerts_webhook) - cluster_moogsoft_alerts = jsonencode(local.cluster_moogsoft_alerts) - GOOGLE_CHAT_ALERTS_ENABLED = local.google_chat_alerts == "" ? false : true - GOOGLE_CHAT_CONFIGS = jsonencode(local.google_chat_alerts) - PAGER_DUTY_ALERTS_ENABLED = local.cluster_pagerduty_alerts == "" ? false : true - PAGER_DUTY_KEY = var.pagerduty_integration_key - PAGER_DUTY_ENDPOINT_URL = jsonencode(local.cluster_pagerduty_alerts) - GRAFANA_HOST = local.grafana_enable ? local.grafana_host : "" - SLACK_CHAT_ALERTS_ENABLED = local.cluster_slack_alerts == "" ? false : true - WEBHOOK_ALERTS_ENABLED = local.cluster_webhook_alerts == "" ? false : true - SLACK_CONFIGS = jsonencode(local.cluster_slack_alerts) - WEBHOOK_CONFIGS = jsonencode(local.cluster_webhook_alerts) + vars = { + PROMETHEUS_DISK_SIZE = try(var.observability_config.prometheus.persistence.disk_size != null ? var.observability_config.prometheus.persistence.disk_size : "50Gi", "50Gi") + PROMETHEUS_RETENTION_SIZE = try(var.observability_config.prometheus.persistence.retention_size != null ? var.observability_config.prometheus.persistence.retention_size : "45GB", "45GB") + PROMETHEUS_RETENTION_DURATION = try(var.observability_config.prometheus.persistence.retention_duration != null ? var.observability_config.prometheus.persistence.retention_duration : "10d", "10d") + REMOTE_WRITE_CONFIGS = jsonencode(local.remote_write_config) + CLUSTER_NAME = local.cluster_name + ALERTS_ENABLED = jsonencode(local.cluster_moogsoft_alerts) != "" || jsonencode(local.namespace_teams_webhook) != "" || jsonencode(local.cluster_teams_alerts) != "" || jsonencode(local.google_chat_alerts) != "" || jsonencode(local.cluster_slack_alerts) != "" || jsonencode(local.cluster_webhook_alerts) != "" ? true : false + MOOGSOFT_ALERTS_ENABLED = local.cluster_moogsoft_alerts == {} ? false : true + MS_TEAMS_ALERT_ENABLED = jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? false : true + MOOGSOFT_ENDPOINT_URL = jsonencode(local.cluster_moogsoft_alerts) + MOOGSOFT_ENDPOINT_API_KEY = var.moogsoft_endpoint_api_key + MOOGSOFT_USERNAME = var.moogsoft_username + teams_webhook_alerts = jsonencode(local.cluster_alerts) + cluster_teams_alerts = jsonencode(local.cluster_alerts_webhook) + cluster_moogsoft_alerts = jsonencode(local.cluster_moogsoft_alerts) + GOOGLE_CHAT_ALERTS_ENABLED = local.google_chat_alerts == "" ? false : true + GOOGLE_CHAT_CONFIGS = jsonencode(local.google_chat_alerts) + PAGER_DUTY_ALERTS_ENABLED = local.cluster_pagerduty_alerts == "" ? false : true + PAGER_DUTY_KEY = var.pagerduty_integration_key + PAGER_DUTY_ENDPOINT_URL = jsonencode(local.cluster_pagerduty_alerts) + GRAFANA_HOST = local.grafana_enable ? local.grafana_host : "" + SLACK_CHAT_ALERTS_ENABLED = local.cluster_slack_alerts == "" ? false : true + WEBHOOK_ALERTS_ENABLED = local.cluster_webhook_alerts == "" ? false : true + SLACK_CONFIGS = jsonencode(local.cluster_slack_alerts) + WEBHOOK_CONFIGS = jsonencode(local.cluster_webhook_alerts) } } @@ -101,7 +101,7 @@ resource "helm_release" "prometheus" { chart = "kube-prometheus-stack" name = "prometheus" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name create_namespace = true version = try(var.observability_config.prometheus.version != null ? var.observability_config.prometheus.version : "60.0.0", "60.0.0") @@ -130,20 +130,20 @@ resource "helm_release" "alerts_teams" { data "template_file" "cluster-alerts" { template = file("./templates/cluster-level-alerts.yaml") - vars = { + vars = { cluster_memory_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_underutilisation != null ? var.cluster_alert_thresholds.memory_underutilisation : 20) - cluster_cpu_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.cpu_underutilisation != null ? var.cluster_alert_thresholds.cpu_underutilisation : 20) - cluster_node_count_max_value = var.node_config.max_count - cluster_node_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.node_count != null ? var.cluster_alert_thresholds.node_count : 80) - cluster_pod_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.pod_count != null ? var.cluster_alert_thresholds.pod_count : 80) - cluster_total_cpu_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cpu_utilisation != null ? var.cluster_alert_thresholds.cpu_utilisation : 80) - cluster_total_memory_utilization_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_utilisation != null ? var.cluster_alert_thresholds.memory_utilisation : 20) - cluster_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.disk_utilization != null ? var.cluster_alert_thresholds.disk_utilization : 80) - cluster_name = local.cluster_name - cortex_enabled = try(var.observability_config.cortex == null ? false : var.observability_config.cortex.enable, false) - nginx_5xx_percentage_threshold = var.cluster_alert_thresholds == null ? 5 : (var.cluster_alert_thresholds.nginx_5xx_percentage_threshold != null ? var.cluster_alert_thresholds.nginx_5xx_percentage_threshold : 5) - cortex_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cortex_disk_utilization_threshold != null ? var.cluster_alert_thresholds.cortex_disk_utilization_threshold : 80) - prometheus_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.prometheus_disk_utilization_threshold != null ? var.cluster_alert_thresholds.prometheus_disk_utilization_threshold : 80) + cluster_cpu_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.cpu_underutilisation != null ? var.cluster_alert_thresholds.cpu_underutilisation : 20) + cluster_node_count_max_value = var.node_config.max_count + cluster_node_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.node_count != null ? var.cluster_alert_thresholds.node_count : 80) + cluster_pod_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.pod_count != null ? var.cluster_alert_thresholds.pod_count: 80) + cluster_total_cpu_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cpu_utilisation != null ? var.cluster_alert_thresholds.cpu_utilisation: 80) + cluster_total_memory_utilization_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_utilisation != null ? var.cluster_alert_thresholds.memory_utilisation: 20) + cluster_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.disk_utilization != null ? var.cluster_alert_thresholds.disk_utilization: 80) + cluster_name = local.cluster_name + cortex_enabled = try(var.observability_config.cortex == null ? false : var.observability_config.cortex.enable, false) + nginx_5xx_percentage_threshold = var.cluster_alert_thresholds == null ? 5 : (var.cluster_alert_thresholds.nginx_5xx_percentage_threshold != null ? var.cluster_alert_thresholds.nginx_5xx_percentage_threshold: 5) + cortex_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cortex_disk_utilization_threshold != null ? var.cluster_alert_thresholds.cortex_disk_utilization_threshold : 80) + prometheus_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.prometheus_disk_utilization_threshold != null ? var.cluster_alert_thresholds.prometheus_disk_utilization_threshold : 80) } } diff --git a/k8s/aws/eks/vars.tf b/k8s/aws/eks/vars.tf index 361e6ec0..ca13721c 100644 --- a/k8s/aws/eks/vars.tf +++ b/k8s/aws/eks/vars.tf @@ -30,46 +30,60 @@ variable "subnets" { }) } +variable "user_access" { + description = "List of users who will have access to clusters" + type = object({ + app_admins = optional(list(string)) + app_viewers = optional(list(string)) + app_editors = optional(list(string)) + }) + default = { + app_admins = [] + app_viewers = [] + app_editors = [] + } +} + variable "app_namespaces" { description = "List of envs and respective users who will have access to edit non system resources in this cluster" - type = map(object({ - alert_webhooks = optional(list(object({ - type = string - data = string - labels = optional(map(string)) + type = map(object({ + alert_webhooks = optional(list(object({ + type = string + data = string + labels = optional(map(string)) }))) - admins = optional(list(string)) - editors = optional(list(string)) - viewers = optional(list(string)) + admins = optional(list(string)) + editors = optional(list(string)) + viewers = optional(list(string)) })) - default = {} + default = {} } variable "node_config" { description = "List of values for the node configuration of kubernetes cluster" - type = object({ - node_type = string - min_count = number - max_count = number + type = object({ + node_type = string + min_count = number + max_count = number }) validation { - condition = (var.node_config.min_count > 0) + condition = (var.node_config.min_count > 0) error_message = "The variable kube_node_count_min must be greater than 0." } validation { - condition = (var.node_config.max_count < 30) + condition = (var.node_config.max_count < 30) error_message = "The variable kube_node_count_max value must less than 30." } } variable "cluster_alert_webhooks" { description = "details for setting up of different types of alerts." - type = list(object({ - type = string - data = string + type = list(object({ + type = string + data = string labels = optional(map(string)) })) - default = [] + default = [] # example variable # cluster_alert_webhooks = [ @@ -141,12 +155,36 @@ variable "moogsoft_username" { default = "" } +variable "Kong_enterprise_enabled" { + description = "kong enterprise enabled. Default false" + type = bool + default = false +} + +variable "kong_whitelist_cidr" { + description = "Whitelist cidr address for kong manager,Grafana and admin api." + type = string + default = "103.245.47.20/32" +} + variable "custom_inbound_ip_range" { description = "list of custom ip range that are allowed access to services on EKS cluster" type = list default = [] } +variable "kafka" { + description = "Map for kafka input" + type = map( + object( + { + topics = list(string) + } + ) + ) + default = {} +} + variable "public_app" { description = "whether application deploy on public ALB on port 80" type = bool @@ -159,43 +197,77 @@ variable "public_app" { # default = true #} +variable "kong_config" { + description = "Map for consumers" + type = any + default = {} +# kong_config = { +# consumer_list = { +# default = [ +# { +# name = "test1001" +# custom_id = "1001" +# group_list = ["partner-group"] +# } +# ] +# } +# acl_allow_list = { +# default = [ +# { +# name = "partner-acl" +# allow_list = ["partner-group"] +# } +# ] +# } +# custom_domains = [ +# { +# url = "" (required) +# hosted_zone_id = "" (required) +# create_acm_cert = optional (true) +# update_route53_record = false (optional Default : true) +# acm_cert_arn = "" (optional Default : "") +# subject_alternative_names= [] (optional Default : []) +# } +# ] +# } +} variable "worker_ami_config" { description = "Object of worker_ami_config inputs" - type = object({ + type = object({ owner_id = string name = string }) default = { - owner_id = "amazon" - name = "amazon-eks-node-1.32-v20250804" + owner_id = "amazon" + name = "amazon-eks-node-1.32-v20250804" } } variable "cluster_alert_thresholds" { description = "Cluster related configuration." type = object({ - cpu_utilisation = optional(number) - cpu_underutilisation = optional(number) - node_count = optional(number) - memory_utilisation = optional(number) - memory_underutilisation = optional(number) - pod_count = optional(number) - nginx_5xx_percentage_threshold = optional(number) - disk_utilization = optional(number) - cortex_disk_utilization_threshold = optional(number) + cpu_utilisation = optional(number) + cpu_underutilisation = optional(number) + node_count = optional(number) + memory_utilisation = optional(number) + memory_underutilisation = optional(number) + pod_count = optional(number) + nginx_5xx_percentage_threshold = optional(number) + disk_utilization = optional(number) + cortex_disk_utilization_threshold = optional(number) prometheus_disk_utilization_threshold = optional(number) }) - default = { - cpu_utilisation = 80 - cpu_underutilisation = 20 - node_count = 80 - memory_utilisation = 80 - memory_underutilisation = 20 - pod_count = 80 - nginx_5xx_percentage_threshold = 5 - disk_utilization = 20 - cortex_disk_utilization_threshold = 80 + default = { + cpu_utilisation = 80 + cpu_underutilisation = 20 + node_count = 80 + memory_utilisation = 80 + memory_underutilisation = 20 + pod_count = 80 + nginx_5xx_percentage_threshold = 5 + disk_utilization = 20 + cortex_disk_utilization_threshold = 80 prometheus_disk_utilization_threshold = 80 } } @@ -206,27 +278,76 @@ variable "ext_rds_sg_cidr_block" { default = ["10.0.0.0/8"] } +variable "rds_local_access" { + description = "whether RDS needs to be allowed to access from local" + type = bool + default = false +} + +variable "custom_secrets_name_list" { + description = " list of aws secrets that were manually created by prefixing cluster name and environment " + type = map( + object( + { + secrets = list(string) + } + ) + ) + default = {} +} + +variable "ingress_custom_domain" { + description = "Map for k8 ingress for custom domain." + type = map(any) + default = {} + # below is example value + # ingress_custom_domain = { + # acme = [{ ---> namespace + # service = "acme-challenge" ---> service name + # domain = "*.test1.shgw.link" ---> custom domain name + # name = "shgw.link" ---> this should be unique name + # }] + # } +} + variable "pagerduty_integration_key" { description = "Pagerduty Integration key to send data to Pagerduty" type = string default = "" } +variable "cluster_config" { + description = "Configurations on Cluster" + type = map(any) + default = {} +} + +variable "provider_id" { + description = "profile name" + type = string +} + +variable "location" { + description = "location" + type = string + default = "us-west-2" +} + variable "observability_config" { description = "All the configuration related to observability(e.g prometheus, grafana, loki, tempo and cortex)" - type = object({ - suffix = optional(string) + type = object({ + suffix = optional(string) prometheus = optional(object({ - version = optional(string) - enable = bool - persistence = optional(object({ + version = optional(string) + enable = bool + persistence = optional(object({ disk_size = optional(string) retention_size = optional(string) retention_duration = optional(string) })) remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ + host = optional(string) + header = optional(object({ key = optional(string) value = optional(string) })) @@ -235,14 +356,14 @@ variable "observability_config" { }))) })) grafana = optional(object({ - version = optional(string) - enable = bool - url = optional(string) - min_replica = optional(number) - max_replica = optional(number) - request_memory = optional(string) - request_cpu = optional(string) - dashboard = optional(object({ + version = optional(string) + enable = bool + url = optional(string) + min_replica = optional(number) + max_replica = optional(number) + request_memory = optional(string) + request_cpu = optional(string) + dashboard = optional(object({ limit_memory = optional(string) limit_cpu = optional(string) request_memory = optional(string) @@ -255,8 +376,8 @@ variable "observability_config" { request_cpu = optional(string) })) persistence = optional(object({ - type = optional(string) - disk_size = optional(string) + type = optional(string) + disk_size = optional(string) deletion_protection = optional(string) })) configs = optional(object({ @@ -269,19 +390,19 @@ variable "observability_config" { enable = bool log_level = optional(string) max_event_age_second = optional(string) - loki_receivers = optional(list(object({ - name = string - url = string + loki_receivers = optional(list(object({ + name = string + url = string header = optional(object({ key = string value = string })) cluster_id = optional(string) }))) - webhook_receivers = optional(list(object({ - name = string - type = string - url = string + webhook_receivers = optional(list(object({ + name = string + type = string + url = string header = optional(object({ key = string value = string @@ -295,20 +416,20 @@ variable "observability_config" { })) })) loki = optional(object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_lines_received = optional(string) - distributor_bytes_received = optional(number) + distributor_lines_received = optional(string) + distributor_bytes_received= optional(number) distributor_appended_failures = optional(number) - request_errors = optional(number) - panics = optional(number) - request_latency = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + request_errors = optional(number) + panics = optional(number) + request_latency = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) ingester = optional(object({ replicas = optional(number) @@ -361,21 +482,21 @@ variable "observability_config" { })) })) cortex = optional(object({ - enable = bool + enable = bool enable_ingress = optional(bool) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_series_per_metric = optional(number) - max_series_per_user = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_series_per_metric = optional(number) + max_series_per_user = optional(number) max_fetched_chunks_per_query = optional(number) })) query_range = optional(object({ memcached_client_timeout = optional(string) })) compactor = optional(object({ - enable = optional(bool) - replicas = optional(number) + enable = optional(bool) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -385,8 +506,8 @@ variable "observability_config" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) @@ -413,7 +534,7 @@ variable "observability_config" { })) query_frontend = optional(object({ replicas = optional(number) - enable = optional(bool) + enable = optional(bool) })) store_gateway = optional(object({ replication_factor = optional(number) @@ -469,14 +590,14 @@ variable "observability_config" { })) })) mimir = optional(object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ ingestion_rate = optional(number) @@ -486,7 +607,7 @@ variable "observability_config" { max_outstanding_requests_per_tenant = optional(number) })) compactor = optional(object({ - replicas = optional(number) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -496,22 +617,22 @@ variable "observability_config" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) querier = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) query_frontend = optional(object({ replicas = optional(number) @@ -528,30 +649,30 @@ variable "observability_config" { max_memory = optional(string) })) distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) })) })) tempo = optional(object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - ingester_bytes_received = optional(number) - distributor_ingester_appends = optional(number) + ingester_bytes_received = optional(number) + distributor_ingester_appends = optional(number) distributor_ingester_append_failures = optional(number) - ingester_live_traces = optional(number) - distributor_spans_received = optional(number) - distributor_bytes_received = optional(number) - ingester_blocks_flushed = optional(number) - tempodb_blocklist = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + ingester_live_traces = optional(number) + distributor_spans_received = optional(number) + distributor_bytes_received = optional(number) + ingester_blocks_flushed = optional(number) + tempodb_blocklist = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) max_receiver_msg_size = optional(number) ingester = optional(object({ @@ -580,10 +701,10 @@ variable "observability_config" { })) querier = optional(object({ - replicas = optional(number) + replicas = optional(number) })) query_frontend = optional(object({ - replicas = optional(number) + replicas = optional(number) })) metrics_generator = optional(object({ enable = optional(bool) @@ -591,9 +712,9 @@ variable "observability_config" { service_graphs_max_items = optional(number) service_graphs_wait = optional(string) remote_write_flush_deadline = optional(string) - remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ + remote_write = optional(list(object({ + host = optional(string) + header = optional(object({ key = optional(string) value = optional(string) })) @@ -607,13 +728,19 @@ variable "observability_config" { variable "namespace_folder_list" { description = "List of Namespaces configured in the cluster" - type = list(string) - default = [] + type = list(string) + default = [] +} + +variable "provisioner" { + description = "Provisioner being used to setup Infra" + type = string + default = "zop-dev" } variable "standard_tags" { description = "standard tags for resources" - type = object({ + type = object ({ project = optional(string) provisioner = optional(string) }) @@ -622,10 +749,10 @@ variable "standard_tags" { variable "fluent_bit" { description = "Inputs for Fluent Bit configurations" - type = object({ - enable = string + type = object({ + enable = string cloud_watch_enable = string - loki = optional(list(object({ + loki = optional(list(object({ host = string tenant_id = optional(string) labels = string @@ -633,9 +760,9 @@ variable "fluent_bit" { tls = optional(string) }))) http = optional(list(object({ - host = string - port = optional(number) - uri = optional(string) + host = string + port = optional(number) + uri = optional(string) headers = optional(list(object({ key = string value = string @@ -651,24 +778,24 @@ variable "fluent_bit" { tls_verify = optional(string) }))) datadog = optional(list(object({ - host = string - api_key = string - tls = optional(string) - compress = optional(string) + host = string + api_key = string + tls = optional(string) + compress = optional(string) }))) new_relic = optional(list(object({ - host = optional(string) - api_key = string - compress = optional(string) + host = optional(string) + api_key = string + compress = optional(string) }))) - slack = optional(list(object({ - webhook = string + slack = optional(list(object({ + webhook = string }))) }) default = null } -variable "cert_issuer_config" { +variable "cert_issuer_config"{ description = "email to be added as cert-manager issuer" type = object({ env = optional(string) @@ -688,10 +815,10 @@ variable "slack_alerts_configs" { variable "webhook_alerts_configs" { type = list(object({ - name = string - url = string + name = string + url = string send_resolved = optional(bool, true) - labels = optional(map(string)) + labels = optional(map(string)) })) default = [] } @@ -712,13 +839,13 @@ variable "karpenter_configs" { description = "Inputs for karpenter - enabling flag, GCP machine types, and capacity types ('on-demand' or 'spot')" type = object({ - enable = bool - machine_types = list(string) + enable = bool + machine_types = list(string) capacity_types = list(string) }) default = { - enable = false - machine_types = [] + enable = false + machine_types = [] capacity_types = [] } diff --git a/k8s/aws/namespace/badger-db.tf b/k8s/aws/namespace/badger-db.tf index 7d6087da..68cfef4a 100644 --- a/k8s/aws/namespace/badger-db.tf +++ b/k8s/aws/namespace/badger-db.tf @@ -1,7 +1,7 @@ locals { badger_db_volume_mounts_services = tomap({ for k, v in var.services : k => { - (k) = { + "${k}" = { mount_path = "/etc/data" } } if coalesce(v.badger_db, false) @@ -9,7 +9,7 @@ locals { badger_db_volume_mounts_crons = tomap({ for k, v in var.cron_jobs : k => { - (k) = { + "${k}" = { mount_path = "/etc/data" } } if coalesce(v.badger_db, false) diff --git a/k8s/aws/namespace/db.tf b/k8s/aws/namespace/db.tf index 1407907d..eff58e0d 100644 --- a/k8s/aws/namespace/db.tf +++ b/k8s/aws/namespace/db.tf @@ -1,22 +1,26 @@ locals { cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" - vpc_id = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].vpc_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].vpc_id : module.remote_state_azure_cluster[0].vpc_id) + vpc_id = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].vpc_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].vpc_id : module.remote_state_azure_cluster[0].vpc_id) + cluster_name_parts = split("-", local.cluster_name) + environment = var.app_env - enable_db = try(var.sql_db.enable, false) - db_list = distinct(concat(distinct([for key, value in var.services : value.db_name]), distinct([for key, value in var.cron_jobs : value.db_name]))) + enable_db = try(var.sql_db.enable, false) + db_list = distinct(concat(distinct([for key, value in var.services: value.db_name]), distinct([for key, value in var.cron_jobs: value.db_name]))) - common_tags = merge(var.common_tags, + common_tags = merge(var.common_tags, tomap({ - project = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name, local.cluster_name) + project = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name ,local.cluster_name) provisioner = try(var.standard_tags.provisioner != null ? var.standard_tags.provisioner : "zop-dev", "zop-dev") - })) + })) - ext_rds_sg_cidr_block = concat([data.aws_vpc.vpc.cidr_block], var.ext_rds_sg_cidr_block) + ext_rds_sg_cidr_block = concat([data.aws_vpc.vpc.cidr_block], var.ext_rds_sg_cidr_block) - db_subnets_ids = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].all_outputs.db_subnets_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].all_outputs.db_subnets_id : module.remote_state_azure_cluster[0].all_outputs.db_subnets_id) + subnet_ids = concat(local.db_subnets_ids,local.private_subnets_ids) + private_subnets_ids = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].all_outputs.private_subnets_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].all_outputs.private_subnets_id : module.remote_state_azure_cluster[0].all_outputs.private_subnets_id) + db_subnets_ids = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].all_outputs.db_subnets_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].all_outputs.db_subnets_id : module.remote_state_azure_cluster[0].all_outputs.db_subnets_id) - subnet_cidrs = concat(local.db_subnets_cidrs, local.private_subnets_cidrs) + subnet_cidrs = concat(local.db_subnets_cidrs,local.private_subnets_cidrs) private_subnets_cidrs = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].private_subnets : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].private_subnets : module.remote_state_azure_cluster[0].private_subnets) db_subnets_cidrs = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].db_subnets : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].db_subnets : module.remote_state_azure_cluster[0].db_subnets) @@ -24,16 +28,16 @@ locals { { for service_key, service_value in var.services : service_value.datastore_configs.name => [ - service_value.datastore_configs.databse - ]... + service_value.datastore_configs.databse + ]... if try(service_value.datastore_configs.name, null) != null && try(service_value.datastore_configs.databse, null) != null }, { for cron_key, cron_value in var.cron_jobs : cron_value.datastore_configs.name => [ - cron_value.datastore_configs.databse - ]... + cron_value.datastore_configs.databse + ]... if try(cron_value.datastore_configs.name, null) != null && try(cron_value.datastore_configs.databse, null) != null } @@ -52,13 +56,13 @@ data "aws_vpc" "vpc" { module "rds" { - source = "../../../sql/aws-rds" - cluster_name = local.cluster_name - namespace = kubernetes_namespace.app_environments.metadata[0].name + source = "../../../sql/aws-rds" + cluster_name = local.cluster_name + namespace = kubernetes_namespace.app_environments.metadata[0].name - count = local.enable_db == false ? 0 : 1 + count = local.enable_db == false ? 0 : 1 - enable_ssl = try(var.sql_db.enable_ssl, false) + enable_ssl = try(var.sql_db.enable_ssl,false) aws_region = var.app_region db_subnets = local.db_subnets_ids vpc_id = local.vpc_id @@ -74,19 +78,19 @@ module "rds" { read_replica_multi_az = var.sql_db.multi_az != null ? (var.sql_db.multi_az == true && var.sql_db.read_replica_multi_az != null ? var.sql_db.read_replica_multi_az : false) : false deletion_protection = var.sql_db.deletion_protection != null ? var.sql_db.deletion_protection : true apply_immediately = var.sql_db.apply_changes_immediately != null ? var.sql_db.apply_changes_immediately : false - max_allocated_storage = var.sql_db.rds_max_allocated_storage != null ? var.sql_db.rds_max_allocated_storage : (var.sql_db.disk_size == null ? 200 : (var.sql_db.disk_size >= 200 ? var.sql_db.disk_size + 100 : 200)) + max_allocated_storage = var.sql_db.rds_max_allocated_storage != null ? var.sql_db.rds_max_allocated_storage : ( var.sql_db.disk_size == null ? 200 : ( var.sql_db.disk_size >= 200 ? var.sql_db.disk_size + 100 : 200)) monitoring_interval = try(var.sql_db.monitoring_interval != null ? var.sql_db.monitoring_interval : 0) - log_min_duration_statement = var.sql_db.log_min_duration_statement != null ? var.sql_db.log_min_duration_statement : -1 + log_min_duration_statement = var.sql_db.log_min_duration_statement != null ? var.sql_db.log_min_duration_statement : -1 storage_tier = var.sql_db.storage_tier != null ? var.sql_db.storage_tier : "gp3" postgresql_engine_version = var.sql_db.engine_version != null ? var.sql_db.engine_version : "16.3" - tags = local.common_tags + tags = local.common_tags } resource "kubernetes_service" "db_service" { - count = var.sql_db == null ? 0 : 1 + count = var.sql_db == null ? 0 : 1 metadata { name = "${var.namespace}-rds" namespace = "db" @@ -101,6 +105,11 @@ resource "kubernetes_service" "db_service" { } locals { + additonal_secrets_map = tomap({ + for secret_key in var.custom_namespace_secrets : "${var.namespace}-${secret_key}"=> { + namespace = var.namespace + } + }) } module "rds_v2" { diff --git a/k8s/aws/namespace/kubernetes.tf b/k8s/aws/namespace/kubernetes.tf index 458a8fe6..f59f8366 100644 --- a/k8s/aws/namespace/kubernetes.tf +++ b/k8s/aws/namespace/kubernetes.tf @@ -1,21 +1,22 @@ locals { cluster_prefix = var.shared_services.cluster_prefix != null ? var.shared_services.cluster_prefix : "${var.provider_id}/${var.app_env}/${var.app_name}" + oidc_role = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].oidc_role : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].oidc_role : module.remote_state_azure_cluster[0].oidc_role) } module "remote_state_gcp_cluster" { - source = "../../../remote-state/gcp" - count = var.shared_services.type == "gcp" ? 1 : 0 - bucket_name = var.shared_services.bucket - bucket_prefix = local.cluster_prefix + source = "../../../remote-state/gcp" + count = var.shared_services.type == "gcp" ? 1 : 0 + bucket_name = var.shared_services.bucket + bucket_prefix = local.cluster_prefix } module "remote_state_aws_cluster" { - source = "../../../remote-state/aws" - count = var.shared_services.type == "aws" ? 1 : 0 - bucket_name = var.shared_services.bucket - provider_id = var.shared_services.profile - bucket_prefix = local.cluster_prefix - location = var.shared_services.location + source = "../../../remote-state/aws" + count = var.shared_services.type == "aws" ? 1 : 0 + bucket_name = var.shared_services.bucket + provider_id = var.shared_services.profile + bucket_prefix = local.cluster_prefix + location = var.shared_services.location } module "remote_state_azure_cluster" { @@ -37,7 +38,7 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } @@ -45,7 +46,7 @@ provider "kubernetes" { provider "kubectl" { load_config_file = false host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } @@ -53,7 +54,7 @@ provider "kubectl" { provider "helm" { kubernetes { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } } \ No newline at end of file diff --git a/k8s/aws/namespace/nginx.tf b/k8s/aws/namespace/nginx.tf index a05a91c1..bb944399 100644 --- a/k8s/aws/namespace/nginx.tf +++ b/k8s/aws/namespace/nginx.tf @@ -1,13 +1,13 @@ locals { - domain_name = try(var.accessibility.domain_name != null ? var.accessibility.domain_name : "", "") + domain_name = try(var.accessibility.domain_name != null ? var.accessibility.domain_name : "", "") default_domain_list = merge([ for service, service_config in var.services : { - (service) = { - ingress = ["${split(":", service)[0]}-${var.namespace}.${local.domain_name}"] + "${ service }" = { + ingress = ["${split(":", service)[0]}-${var.namespace}.${local.domain_name}"] basic_auth = (service_config.enable_basic_auth != null ? service_config.enable_basic_auth : false) ? true : false } - } if(coalesce(var.services[service].enable_default_ingress, false) == true) + } if (coalesce(var.services[service].enable_default_ingress, false) == true) ]...) service_custom_domain_list = merge([ @@ -23,12 +23,12 @@ locals { } # Exclude wildcard hosts from custom host logic if !can(regex("^\\*\\.", split("/", host)[0])) - }) if try(length(var.services[service].ingress_list), 0) != 0 + })if try(length(var.services[service].ingress_list),0) != 0 ]...) default_services_list = merge([ for service in keys(local.default_domain_list) : { - for ingress_name in local.default_domain_list[service].ingress : "${service}-${var.namespace}-${ingress_name}" => { + for ingress_name in local.default_domain_list[service].ingress : "${service}-${var.namespace}-${ingress_name}" => { service_name = split(":", service)[0] service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] # domain_name backward compatible with namespace based names if app_env is not given, if app_env is given then new scheme is chosen @@ -43,54 +43,54 @@ locals { wildcard_custom_hosts = merge([ for service, config in var.services : tomap({ for host in try(config.ingress_list, []) : - "${service}-${var.namespace}-${host}" => { - service_name = split(":", service)[0] - service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] - ingress_host = split("/", host)[0] - ns = var.namespace - ingress_name = lower(replace("${split(":", service)[0]}-${replace(host, "/", "-")}-wildcard-ingress", "*", "wildcard")) - base_domain = replace(split("/", host)[0], "*.", "") - } - if can(regex("^\\*\\.", split("/", host)[0])) + "${service}-${var.namespace}-${host}" => { + service_name = split(":", service)[0] + service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] + ingress_host = split("/", host)[0] + ns = var.namespace + ingress_name = lower(replace("${split(":", service)[0]}-${replace(host, "/", "-")}-wildcard-ingress", "*", "wildcard")) + base_domain = replace(split("/", host)[0], "*.", "") + } + if can(regex("^\\*\\.", split("/", host)[0])) }) if try(length(config.ingress_list), 0) != 0 ]...) } resource "random_password" "basic_auth_password" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } length = 32 special = true override_special = "_@" } resource "random_string" "basic_auth_user_name_suffix" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - length = 6 - special = true - upper = false - numeric = false + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + length = 6 + special = true + upper = false + numeric = false min_special = 2 - lower = true + lower = true } resource "aws_secretsmanager_secret" "basic_auth_credentials" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } name = "${local.cluster_name}-${var.namespace}-${each.key}-basic-auth-credentials" tags = local.common_tags } resource "aws_secretsmanager_secret_version" "basic_auth_credentials" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - secret_id = aws_secretsmanager_secret.basic_auth_credentials[each.key].id + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + secret_id = aws_secretsmanager_secret.basic_auth_credentials[each.key].id secret_string = jsonencode({ user_name = "${each.key}-${random_string.basic_auth_user_name_suffix[each.key].result}", - password = random_password.basic_auth_password[each.key].result }) + password = random_password.basic_auth_password[each.key].result }) } resource "kubernetes_secret_v1" "basic_auth_secret" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false} metadata { - name = "${each.key}-basic-auth" + name = "${each.key}-basic-auth" namespace = var.namespace } data = { @@ -100,15 +100,15 @@ resource "kubernetes_secret_v1" "basic_auth_secret" { } resource "kubernetes_ingress_v1" "default_service_ingress" { - for_each = { for service, value in local.default_services_list : service => value } + for_each = {for service, value in local.default_services_list : service => value } metadata { name = each.value.ingress_name namespace = each.value.ns annotations = { - "kubernetes.io/ingress.class" = "nginx" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "kubernetes.io/ingress.class" = "nginx" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -137,17 +137,17 @@ resource "kubernetes_ingress_v1" "default_service_ingress" { } resource "kubernetes_ingress_v1" "custom_service_ingress" { - for_each = { for service, value in local.service_custom_domain_list : service => value if value.path_based_routing == "" } + for_each = {for service, value in local.service_custom_domain_list : service => value if value.path_based_routing == "" } metadata { name = each.value.ingress_name namespace = each.value.ns annotations = { - "kubernetes.io/ingress.class" = "nginx" - "cert-manager.io/issuer" = "letsencrypt" - "kubernetes.io/tls-acme" = "true" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "kubernetes.io/ingress.class" = "nginx" + "cert-manager.io/issuer" = "letsencrypt" + "kubernetes.io/tls-acme" = "true" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -168,15 +168,15 @@ resource "kubernetes_ingress_v1" "custom_service_ingress" { } } tls { - secret_name = "tls-secret-${each.value.ingress_host}" - hosts = [each.value.ingress_host] + secret_name ="tls-secret-${each.value.ingress_host}" + hosts =[each.value.ingress_host] } } depends_on = [kubernetes_namespace.app_environments] } resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { - for_each = { for service, value in local.service_custom_domain_list : service => value if value.path_based_routing != "" } + for_each = {for service, value in local.service_custom_domain_list : service => value if value.path_based_routing != "" } metadata { name = each.value.ingress_name namespace = each.value.ns @@ -186,9 +186,9 @@ resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { "kubernetes.io/tls-acme" = "true" "nginx.ingress.kubernetes.io/use-regex" = "true" "nginx.ingress.kubernetes.io/rewrite-target" = "/$2" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" - "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -209,8 +209,8 @@ resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { } } tls { - secret_name = "tls-secret-${each.value.ingress_host}" - hosts = [each.value.ingress_host] + secret_name ="tls-secret-${each.value.ingress_host}" + hosts =[each.value.ingress_host] } } depends_on = [kubernetes_namespace.app_environments] diff --git a/k8s/aws/namespace/outputs.tf b/k8s/aws/namespace/outputs.tf index 08741d4e..34081fa0 100644 --- a/k8s/aws/namespace/outputs.tf +++ b/k8s/aws/namespace/outputs.tf @@ -1,5 +1,5 @@ output "k8s_ca" { - value = data.aws_eks_cluster.cluster.certificate_authority[0].data + value = data.aws_eks_cluster.cluster.certificate_authority.0.data } output "cluster_endpoint" { @@ -28,14 +28,14 @@ output "service_configs" { value = { for k, v in var.services : k => { - db_name = v.db_name != null ? v.db_name : "" - db_secret_name = v.db_name != null ? "${local.cluster_name}-${var.namespace}-${v.db_name}-db-user-secret" : "" - db_read_only_secret_name = v.db_name != null ? "${local.cluster_name}-${var.namespace}-${v.db_name}-db-readonly-secret" : "" - db_user = v.db_name != null ? module.rds[0].db_user["${var.namespace}-${v.db_name}"] : "" - custom_host_url = v.ingress_list != null ? (length(v.ingress_list) != 0 ? v.ingress_list : []) : [] - default_host_url = v.enable_default_ingress != null ? (v.enable_default_ingress ? kubernetes_ingress_v1.default_service_ingress["${k}-${var.namespace}-${local.default_domain_list[k].ingress[0]}"].spec[0].rule[0].host : "") : "" - basic_auth_user_name = (v.enable_basic_auth != null ? v.enable_basic_auth : false) ? "${k}-${random_string.basic_auth_user_name_suffix[k].result}" : "" - basic_auth_password = (v.enable_basic_auth != null ? v.enable_basic_auth : false) ? "${k}-${random_password.basic_auth_password[k].result}" : "" + db_name = v.db_name != null ? v.db_name : "" + db_secret_name = v.db_name != null ? "${local.cluster_name}-${var.namespace}-${v.db_name}-db-user-secret" : "" + db_read_only_secret_name = v.db_name != null ? "${local.cluster_name}-${var.namespace}-${v.db_name}-db-readonly-secret" : "" + db_user = v.db_name != null ? module.rds[0].db_user["${var.namespace}-${v.db_name}"] : "" + custom_host_url = v.ingress_list != null ? (length(v.ingress_list) != 0 ? v.ingress_list : []) : [] + default_host_url = v.enable_default_ingress != null ? ( v.enable_default_ingress ? kubernetes_ingress_v1.default_service_ingress["${k}-${var.namespace}-${local.default_domain_list[k].ingress[0]}"].spec[0].rule[0].host : "") : "" + basic_auth_user_name = (v.enable_basic_auth != null ? v.enable_basic_auth : false) ? "${k}-${random_string.basic_auth_user_name_suffix[k].result}" : "" + basic_auth_password = (v.enable_basic_auth != null ? v.enable_basic_auth : false) ? "${k}-${random_password.basic_auth_password[k].result}" : "" } } sensitive = true @@ -45,10 +45,10 @@ output "cron_jobs_configs" { value = { for k, v in var.services : k => { - db_name = v.db_name != null ? v.db_name : "" - db_secret_name = v.db_name != null ? "${local.cluster_name}-${var.namespace}-${v.db_name}-db-user-secret" : "" - db_read_only_secret_name = v.db_name != null ? "${local.cluster_name}-${var.namespace}-${v.db_name}-db-readonly-secret" : "" - db_user = v.db_name != null ? module.rds[0].db_user["${var.namespace}-${v.db_name}"] : "" + db_name = v.db_name != null ? v.db_name : "" + db_secret_name = v.db_name != null ? "${local.cluster_name}-${var.namespace}-${v.db_name}-db-user-secret" : "" + db_read_only_secret_name = v.db_name != null ? "${local.cluster_name}-${var.namespace}-${v.db_name}-db-readonly-secret" : "" + db_user = v.db_name != null ? module.rds[0].db_user["${var.namespace}-${v.db_name}"] : "" } } sensitive = true @@ -57,7 +57,7 @@ output "cron_jobs_configs" { output "dynamo_db_table_name" { value = tomap( { - for k, v in module.dynamodb_table : "${var.namespace}-${k}" => v.dynamodb_table_id + for k, v in module.dynamodb_table : "${var.namespace}-${k}" => v.dynamodb_table_id } ) } @@ -65,7 +65,7 @@ output "dynamo_db_table_name" { output "dynamo_db_table_arn" { value = tomap( { - for k, v in module.dynamodb_table : "${var.namespace}-${k}" => v.dynamodb_table_arn + for k, v in module.dynamodb_table : "${var.namespace}-${k}" => v.dynamodb_table_arn } ) } @@ -74,7 +74,7 @@ output "dynamo_db_table_arn" { output "dynamo_user_access_key" { value = tomap( { - for k, v in aws_iam_access_key.dynamo_keys : (var.namespace) => try(jsondecode(aws_secretsmanager_secret_version.dynamo_db_secrets[0].secret_string).access_key, "null") + for k, v in aws_iam_access_key.dynamo_keys : (var.namespace) => try(jsondecode(aws_secretsmanager_secret_version.dynamo_db_secrets[0].secret_string).access_key, "null") } ) sensitive = true @@ -83,7 +83,7 @@ output "dynamo_user_access_key" { output "dynamo_user_secret_key" { value = tomap( { - for k, v in aws_iam_access_key.dynamo_keys : (var.namespace) => "${local.cluster_name}-${var.namespace}-dynamo-user-secret-key" + for k, v in aws_iam_access_key.dynamo_keys : (var.namespace) => "${local.cluster_name}-${var.namespace}-dynamo-user-secret-key" } ) sensitive = true @@ -91,7 +91,7 @@ output "dynamo_user_secret_key" { output "custom_secrets_name_list" { value = tomap({ - for k, v in var.custom_namespace_secrets : v => "${local.cluster_name}-${var.namespace}-${v}-secret" + for k, v in var.custom_namespace_secrets : v=> "${local.cluster_name}-${var.namespace}-${v}-secret" }) } diff --git a/k8s/aws/namespace/secrets.tf b/k8s/aws/namespace/secrets.tf index c7ec2da8..6137c41e 100644 --- a/k8s/aws/namespace/secrets.tf +++ b/k8s/aws/namespace/secrets.tf @@ -1,4 +1,12 @@ locals { + custom_secrets = merge([ + for k in keys(var.services) : tomap({ + for secret in var.services[k].custom_secrets : "${k}-${secret}" => { + secret_name = secret + service = k + } + }) if var.services[k].custom_secrets != null + ]...) } # Service account with access to fetch the AWS secrets in all environment namespace @@ -14,20 +22,20 @@ resource "kubernetes_service_account" "secrets" { # Adds the secrets provider for the secrets initialized for the namespace resource "kubectl_manifest" "secrets_provider" { - for_each = { for k, v in var.services : k => v } + for_each = { for k,v in var.services : k => v } yaml_body = templatefile("${path.module}/templates/secret-provider-class.yaml", { secrets = jsonencode(concat( - (each.value.db_name != null ? [{ key = "DB_PASSWORD", value = "${local.cluster_name}-${var.namespace}-${each.value.db_name}-db-secret" }] : []), - (each.value.datastore_configs != null ? [{ key = "DB_PASSWORD", value = "${local.cluster_name}-${var.namespace}-${each.value.datastore_configs.databse}-db-secret" }] : []), - # var.cassandra_db == null ? [] : ["${local.cluster_name}-${var.namespace}-cassandra-secret"], - try([for secret in each.value.custom_secrets : { key = secret, value = "${local.cluster_name}-${var.namespace}-${each.key}-${secret}-secret" }], []), - try([for ns in var.custom_namespace_secrets : { key = ns, value = "${local.cluster_name}-${var.namespace}-${ns}-secret" }], []), - length(var.dynamo_db) == 0 ? [] : ["${local.cluster_name}-${var.namespace}-dynamo-user-secret-key"], - # can(var.kafka[var.namespace].topics)== true ? (length(var.kafka[var.namespace].topics) > 0 ? ["${local.cluster_name}-msk-secret"] : []) : [] + (each.value.db_name != null ? [{ key = "DB_PASSWORD" , value = "${local.cluster_name}-${var.namespace}-${each.value.db_name}-db-secret" }] : []), + (each.value.datastore_configs != null ? [{ key = "DB_PASSWORD" , value = "${local.cluster_name}-${var.namespace}-${each.value.datastore_configs.databse}-db-secret" }] : []), +# var.cassandra_db == null ? [] : ["${local.cluster_name}-${var.namespace}-cassandra-secret"], + try([for secret in each.value.custom_secrets : { key = secret, value = "${local.cluster_name}-${var.namespace}-${each.key}-${secret}-secret"}], []), + try([for ns in var.custom_namespace_secrets : { key = ns , value = "${local.cluster_name}-${var.namespace}-${ns}-secret"}], []), + length(var.dynamo_db) == 0 ? [] : ["${local.cluster_name}-${var.namespace}-dynamo-user-secret-key"], +# can(var.kafka[var.namespace].topics)== true ? (length(var.kafka[var.namespace].topics) > 0 ? ["${local.cluster_name}-msk-secret"] : []) : [] )) - namespace = kubernetes_namespace.app_environments.metadata[0].name + namespace = kubernetes_namespace.app_environments.metadata[0].name service_name = each.key } ) @@ -35,39 +43,39 @@ resource "kubectl_manifest" "secrets_provider" { # Adds the secrets provider for the secrets initialized for the namespace resource "kubectl_manifest" "secrets_provider_cron_jobs" { - for_each = { for k, v in var.cron_jobs : k => v } + for_each = { for k,v in var.cron_jobs : k => v } yaml_body = templatefile("${path.module}/templates/secret-provider-class.yaml", { secrets = jsonencode(concat( - (each.value.db_name != null ? [{ key = "DB_PASSWORD", value = "${local.cluster_name}-${var.namespace}-${each.value.db_name}-db-secret" }] : []), - (each.value.datastore_configs != null ? [{ key = "DB_PASSWORD", value = "${local.cluster_name}-${var.namespace}-${each.value.datastore_configs.databse}-db-secret" }] : []), + (each.value.db_name != null ? [{ key = "DB_PASSWORD" , value = "${local.cluster_name}-${var.namespace}-${each.value.db_name}-db-secret" }] : []), + (each.value.datastore_configs != null ? [{ key = "DB_PASSWORD" , value = "${local.cluster_name}-${var.namespace}-${each.value.datastore_configs.databse}-db-secret" }] : []), # var.cassandra_db == null ? [] : ["${local.cluster_name}-${var.namespace}-cassandra-secret"], - try([for secret in each.value.custom_secrets : { key = secret, value = "${local.cluster_name}-${var.namespace}-${each.key}-${secret}-secret" }], []), - try([for ns in var.custom_namespace_secrets : { key = ns, value = "${local.cluster_name}-${var.namespace}-${ns}-secret" }], []), - length(var.dynamo_db) == 0 ? [] : ["${local.cluster_name}-${var.namespace}-dynamo-user-secret-key"], + try([for secret in each.value.custom_secrets : { key = secret, value = "${local.cluster_name}-${var.namespace}-${each.key}-${secret}-secret"}], []), + try([for ns in var.custom_namespace_secrets : { key = ns , value = "${local.cluster_name}-${var.namespace}-${ns}-secret"}], []), + length(var.dynamo_db) == 0 ? [] : ["${local.cluster_name}-${var.namespace}-dynamo-user-secret-key"], # can(var.kafka[var.namespace].topics)== true ? (length(var.kafka[var.namespace].topics) > 0 ? ["${local.cluster_name}-msk-secret"] : []) : [] )) - namespace = kubernetes_namespace.app_environments.metadata[0].name + namespace = kubernetes_namespace.app_environments.metadata[0].name service_name = each.key } ) } resource "random_password" "cassandra_password" { - count = var.cassandra_db == null ? 0 : 1 - length = 16 - special = false + count = var.cassandra_db == null ? 0 : 1 + length = 16 + special = false } resource "aws_secretsmanager_secret" "cassandra_secret" { - count = var.cassandra_db == null ? 0 : 1 - name = "${local.cluster_name}-${var.namespace}-cassandra-secret" - tags = local.common_tags + count = var.cassandra_db == null ? 0 : 1 + name = "${local.cluster_name}-${var.namespace}-cassandra-secret" + tags = local.common_tags } resource "aws_secretsmanager_secret_version" "cassandra_secret" { - count = var.cassandra_db == null ? 0 : 1 + count = var.cassandra_db == null ? 0 : 1 secret_id = aws_secretsmanager_secret.cassandra_secret[0].id secret_string = random_password.cassandra_password[0].result } diff --git a/k8s/aws/namespace/vars.tf b/k8s/aws/namespace/vars.tf index 9f57bd59..4a63f77b 100644 --- a/k8s/aws/namespace/vars.tf +++ b/k8s/aws/namespace/vars.tf @@ -8,6 +8,12 @@ variable "app_name" { type = string } +variable "public_ingress" { + description = "Whether ingress is public or not." + type = string + default = false +} + variable "app_env" { description = "Application deployment environment." type = string @@ -22,56 +28,56 @@ variable "namespace" { variable "cron_jobs" { description = "Map of cron jobs to be executed within the namespace" - type = map(object({ - repo_name = optional(string) - ecr_repo = optional(string) - region = optional(string) - account_id = optional(string) - db_name = optional(string) - redis = optional(bool) - local_redis = optional(bool) - custom_secrets = optional(list(string)) + type = map(object({ + repo_name = optional(string) + ecr_repo = optional(string) + region = optional(string) + account_id = optional(string) + db_name = optional(string) + redis = optional(bool) + local_redis = optional(bool) + custom_secrets = optional(list(string)) enable_default_ingress = optional(bool) enable_basic_auth = optional(bool) - service_deployer = string - ingress_list = optional(list(string)) - badger_db = optional(bool) + service_deployer = string + ingress_list = optional(list(string)) + badger_db = optional(bool) datastore_configs = optional(object({ - name = optional(string) + name = optional(string) databse = optional(string) - type = optional(string) + type = optional(string) })) redis_configs = optional(object({ name = optional(string) port = optional(number) })) - helm_configs = optional(object({ + helm_configs = optional(object({ image_pull_secrets = optional(list(string)) schedule = string suspend = optional(bool) image = optional(string) concurrency_policy = optional(string) - http_port = optional(string) - metrics_port = optional(string) - min_cpu = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - env = optional(map(any)) - env_list = optional(list(object({ + http_port = optional(string) + metrics_port = optional(string) + min_cpu = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + env = optional(map(any)) + env_list = optional(list(object({ name = string value = string }))) - command = optional(list(string)) - configmaps_list = optional(list(string)) - secrets_list = optional(list(string)) - volume_mounts = optional(object({ - configmaps = optional(map(object({ + command = optional(list(string)) + configmaps_list = optional(list(string)) + secrets_list = optional(list(string)) + volume_mounts = optional(object({ + configmaps = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - secrets = optional(map(object({ + secrets = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) @@ -82,89 +88,89 @@ variable "cron_jobs" { })) })) })) - default = {} + default = {} } variable "services" { description = "Map of services to be deployed within the namespace" - type = map(object({ - repo_name = optional(string) - ecr_repo = optional(string) - region = optional(string) - account_id = optional(string) - db_name = optional(string) - redis = optional(bool) - local_redis = optional(bool) - custom_secrets = optional(list(string)) + type = map(object({ + repo_name = optional(string) + ecr_repo = optional(string) + region = optional(string) + account_id = optional(string) + db_name = optional(string) + redis = optional(bool) + local_redis = optional(bool) + custom_secrets = optional(list(string)) enable_default_ingress = optional(bool) enable_basic_auth = optional(bool) - service_deployer = string - ingress_list = optional(list(string)) - badger_db = optional(bool) + service_deployer = string + ingress_list = optional(list(string)) + badger_db = optional(bool) datastore_configs = optional(object({ - name = optional(string) + name = optional(string) databse = optional(string) - type = optional(string) + type = optional(string) })) redis_configs = optional(object({ name = optional(string) port = optional(number) })) - helm_configs = optional(object({ + helm_configs = optional(object({ image_pull_secrets = optional(list(string)) - replica_count = optional(number) + replica_count = optional(number) image = optional(string) - cli_service = optional(bool) - http_port = optional(string) - metrics_port = optional(string) - ports = optional(map(any)) - min_cpu = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - min_available = optional(number) - heartbeat_url = optional(string) - env = optional(map(any)) - env_list = optional(list(object({ + cli_service = optional(bool) + http_port = optional(string) + metrics_port = optional(string) + ports = optional(map(any)) + min_cpu = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + min_available = optional(number) + heartbeat_url = optional(string) + env = optional(map(any)) + env_list = optional(list(object({ name = string value = string }))) - command = optional(list(string)) - configmaps_list = optional(list(string)) - secrets_list = optional(list(string)) - hpa = optional(object({ - enable = optional(bool) - min_replicas = optional(number) - max_replicas = optional(number) - cpu_limit = optional(string) - memory_limit = optional(string) + command = optional(list(string)) + configmaps_list = optional(list(string)) + secrets_list = optional(list(string)) + hpa = optional(object({ + enable = optional(bool) + min_replicas = optional(number) + max_replicas = optional(number) + cpu_limit = optional(string) + memory_limit = optional(string) })) - readiness_probes = optional(object({ + readiness_probes = optional(object({ enable = optional(bool) initial_delay_seconds = optional(number) period_seconds = optional(number) timeout_seconds = optional(number) failure_threshold = optional(number) })) - liveness_probes = optional(object({ + liveness_probes = optional(object({ enable = optional(bool) initial_delay_seconds = optional(number) period_seconds = optional(number) timeout_seconds = optional(number) failure_threshold = optional(number) })) - volume_mounts = optional(object({ - configmaps = optional(map(object({ + volume_mounts = optional(object({ + configmaps = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - secrets = optional(map(object({ + secrets = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - pvc = optional(map(object({ + pvc = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) @@ -179,18 +185,18 @@ variable "services" { })) })) })) - default = {} + default = {} } variable "user_access" { description = "List of users who will have access to clusters" type = object({ - admins = optional(list(string)) + admins = optional(list(string)) viewers = optional(list(string)) editors = optional(list(string)) }) default = { - admins = [] + admins = [] viewers = [] editors = [] } @@ -214,7 +220,7 @@ variable "accessibility" { variable "sql_db" { description = "Map for rds inputs" - type = object({ + type = object({ enable = optional(bool) admin_user = optional(string) node_type = optional(string) @@ -232,12 +238,12 @@ variable "sql_db" { engine_version = optional(string) enable_ssl = optional(bool) }) - default = null + default = null } variable "local_redis" { description = "Inputs to provision Redis instance within the cluster as a statefulset." - type = object( + type = object( { enable = bool disk_size = optional(string) @@ -253,15 +259,15 @@ variable "local_redis" { variable "cassandra_db" { description = "Map for cassandra inputs" - type = object( - { - admin_user = string - replica_count = number - persistence_size = number + type = object( + { + admin_user = string + replica_count = number + persistence_size = number - } - ) - default = null + } + ) + default = null } variable "common_tags" { @@ -278,16 +284,16 @@ variable "ext_rds_sg_cidr_block" { variable "dynamo_db" { description = "Map for dynaomo_db inputs" - type = map(object({ - hash_key = string - range_key = string - hash_key_type = string - range_key_type = string - billing_mode = string - read_capacity = number - write_capacity = number - ttl_enabled = bool - ttl_attribute_name = string + type = map(object({ + hash_key = string + range_key = string + hash_key_type = string + range_key_type = string + billing_mode = string + read_capacity = number + write_capacity = number + ttl_enabled = bool + ttl_attribute_name = string global_secondary_index = optional(list(object({ name = string hash_key = string @@ -298,7 +304,7 @@ variable "dynamo_db" { non_key_attributes = optional(list(string)) }))) })) - default = {} + default = {} } variable "custom_namespace_secrets" { @@ -307,12 +313,38 @@ variable "custom_namespace_secrets" { default = [] } +variable "ingress_custom_domain" { + description = "Map for k8 ingress for custom domain." + type = map(any) + default = {} + # below is example value + # ingress_custom_domain = { + # acme = [{ ---> namespace + # service = "acme-challenge" ---> service name + # domain = "*.test1.shgw.link" ---> custom domain name + # name = "shgw.link" ---> this should be unique name + # }] + # } +} + variable "rds_local_access" { description = "whether RDS needs to be allowed to access from local" type = bool default = false } +variable "kafka" { + description = "Map for kafka input" + type = map( + object( + { + topics = list(string) + } + ) + ) + default = {} +} + variable "provider_id" { description = "profile name" type = string @@ -343,29 +375,35 @@ variable "helm_charts" { default = {} } +variable "provisioner" { + description = "Provisioner being used to setup Infra" + type = string + default = "zop-dev" +} + variable "shared_services" { type = object({ - type = string - bucket = string - profile = optional(string) - location = optional(string) - resource_group = optional(string) + type = string + bucket = string + profile = optional(string) + location = optional(string) + resource_group = optional(string) storage_account = optional(string) - container = optional(string) - cluster_prefix = optional(string) + container = optional(string) + cluster_prefix = optional(string) }) } variable "standard_tags" { description = "standard tags for resources" - type = object({ + type = object ({ project = optional(string) provisioner = optional(string) }) default = null } -variable "cert_issuer_config" { +variable "cert_issuer_config"{ description = "email to be added as cert-manager issuer" type = object({ env = optional(string) diff --git a/k8s/aws/nginx/vars.tf b/k8s/aws/nginx/vars.tf index ccc022d5..91d7b90d 100644 --- a/k8s/aws/nginx/vars.tf +++ b/k8s/aws/nginx/vars.tf @@ -3,3 +3,25 @@ variable "app_name" { type = string } +variable "app_env" { + description = "This is Environment where the NLB is deployed." + type = string +} + +variable "common_tags" { + description = "A map of common tags for the resources" + type = map(string) + default = {} +} + +variable "inbound_ip" { + description = "list of ip range that are allowed access to services on EKS cluster" + type = list + default = ["10.0.0.0/8"] +} + +variable "public_app" { + description = "whether application deploy on public ALB" + type = bool + default = false +} \ No newline at end of file diff --git a/k8s/azure/aad/main.tf b/k8s/azure/aad/main.tf index aa3a7596..d0d38482 100644 --- a/k8s/azure/aad/main.tf +++ b/k8s/azure/aad/main.tf @@ -1,25 +1,25 @@ data "azuread_domains" "aad_domains" {} resource "random_string" "aad_users_suffix" { - length = 6 - numeric = true - lower = true - upper = false - special = false -} + length = 6 + numeric = true + lower = true + upper = false + special = false + } resource "random_password" "add_user_password" { - for_each = { for k, v in var.users : v => k } - length = 12 - min_lower = 1 - min_upper = 1 + for_each = {for k, v in var.users : v => k} + length = 12 + min_lower = 1 + min_upper = 1 min_numeric = 1 min_special = 1 } resource "azuread_user" "aad_users" { - for_each = { for k, v in var.users : v => k } - user_principal_name = "${replace(each.key, "@", "_")}_${random_string.aad_users_suffix.result}@${data.azuread_domains.aad_domains.domains[*].domain_name[0]}" + for_each = {for k, v in var.users : v => k} + user_principal_name = "${replace(each.key, "@", "_")}_${random_string.aad_users_suffix.result}@${data.azuread_domains.aad_domains.domains.*.domain_name[0]}" display_name = replace(split("@", each.key)[0], ".", "_") password = random_password.add_user_password[each.key].result } \ No newline at end of file diff --git a/k8s/azure/aks/fluentbit.tf b/k8s/azure/aks/fluentbit.tf index 55750656..671b5bec 100644 --- a/k8s/azure/aks/fluentbit.tf +++ b/k8s/azure/aks/fluentbit.tf @@ -1,31 +1,31 @@ locals { - fluent_bit_enable = var.fluent_bit != null ? (var.fluent_bit.enable != null ? var.fluent_bit.enable : false) : false - fluent_bit_loki = local.fluent_bit_enable ? (var.fluent_bit.loki != null ? var.fluent_bit.loki : []) : [] - fluent_bit_http = local.fluent_bit_enable ? (var.fluent_bit.http != null ? var.fluent_bit.http : []) : [] - fluent_bit_splunk = local.fluent_bit_enable ? (var.fluent_bit.splunk != null ? var.fluent_bit.splunk : []) : [] - fluent_bit_datadog = local.fluent_bit_enable ? (var.fluent_bit.datadog != null ? var.fluent_bit.datadog : []) : [] - fluent_bit_newrelic = local.fluent_bit_enable ? (var.fluent_bit.new_relic != null ? var.fluent_bit.new_relic : []) : [] - fluent_bit_slack = local.fluent_bit_enable ? (var.fluent_bit.slack != null ? var.fluent_bit.slack : []) : [] + fluent_bit_enable = var.fluent_bit != null ? (var.fluent_bit.enable != null ? var.fluent_bit.enable : false): false + fluent_bit_loki = local.fluent_bit_enable ? (var.fluent_bit.loki != null ? var.fluent_bit.loki : []) : [] + fluent_bit_http = local.fluent_bit_enable ? (var.fluent_bit.http != null ? var.fluent_bit.http : []) : [] + fluent_bit_splunk = local.fluent_bit_enable ? (var.fluent_bit.splunk != null ? var.fluent_bit.splunk : []): [] + fluent_bit_datadog = local.fluent_bit_enable ? (var.fluent_bit.datadog != null ? var.fluent_bit.datadog : []): [] + fluent_bit_newrelic = local.fluent_bit_enable ? (var.fluent_bit.new_relic != null ? var.fluent_bit.new_relic : []): [] + fluent_bit_slack = local.fluent_bit_enable ? (var.fluent_bit.slack != null ? var.fluent_bit.slack : []): [] fluent_bit_loki_outputs = concat([ - for k, v in local.fluent_bit_loki : { + for k,v in local.fluent_bit_loki : { host = v.host tenant_id = v.tenant_id != null ? v.tenant_id : "" labels = v.labels port = v.port != null ? v.port : 3100 - tls = v.tls != null ? v.tls : "On" + tls = v.tls != null ? v.tls : "On" } if length(local.fluent_bit_loki) > 0 - ], local.enable_loki ? [{ - host = "loki-distributor.loki" - tenant_id = random_uuid.grafana_standard_datasource_header_value.result - labels = "namespace=$kubernetes['namespace_name'],pod=$kubernetes['pod_name'],service=$kubernetes['container_name'],cluster=${local.cluster_name}" - port = 3100 - tls = "Off" - }] : []) + ], local.enable_loki ? [{ + host = "loki-distributor.loki" + tenant_id = random_uuid.grafana_standard_datasource_header_value.result + labels = "namespace=$kubernetes['namespace_name'],pod=$kubernetes['pod_name'],service=$kubernetes['container_name'],cluster=${local.cluster_name}" + port = 3100 + tls = "Off" + }]: []) fluent_bit_http_outputs = [ - for k, v in local.fluent_bit_http : { + for k,v in local.fluent_bit_http : { host = v.host port = v.port != null ? v.port : 80 uri = v.uri != null ? v.uri : "/" @@ -36,7 +36,7 @@ locals { ] fluent_bit_splunk_outputs = [ - for k, v in local.fluent_bit_splunk : { + for k,v in local.fluent_bit_splunk : { host = v.host token = v.token port = v.port != null ? v.port : 8088 @@ -46,36 +46,36 @@ locals { ] fluent_bit_datadog_outputs = [ - for k, v in local.fluent_bit_datadog : { - host = v.host - api_key = v.api_key - tls = v.tls != null ? v.tls : "On" - compress = v.compress != null ? v.compress : "gzip" + for k,v in local.fluent_bit_datadog : { + host = v.host + api_key = v.api_key + tls = v.tls != null ? v.tls : "On" + compress = v.compress != null ? v.compress : "gzip" } if length(local.fluent_bit_datadog) > 0 ] fluent_bit_newrelic_outputs = [ - for k, v in local.fluent_bit_newrelic : { - host = v.host != null ? v.host : "https://log-api.eu.newrelic.com/log/v1" - api_key = v.api_key - compress = v.compress != null ? v.compress : "gzip" + for k,v in local.fluent_bit_newrelic : { + host = v.host != null ? v.host : "https://log-api.eu.newrelic.com/log/v1" + api_key = v.api_key + compress = v.compress != null ? v.compress : "gzip" } if length(local.fluent_bit_newrelic) > 0 ] fluent_bit_slack_outputs = [ - for k, v in local.fluent_bit_slack : { - webhook = v.webhook + for k,v in local.fluent_bit_slack : { + webhook = v.webhook } if length(local.fluent_bit_slack) > 0 ] } -data template_file "fluent-bit" { +data template_file "fluent-bit"{ count = local.fluent_bit_enable ? 1 : 0 template = file("./templates/fluent-bit-values.yaml") - vars = { - "CLUSTER_NAME" = local.cluster_name - "TAGS" = join(",", [for key, value in local.common_tags : "${key}=${value}"]) + vars = { + "CLUSTER_NAME" = local.cluster_name + "TAGS" = join(",", [for key, value in local.common_tags : "${key}=${value}"]) "HTTP_SERVER" = "On" "HTTP_PORT" = "2020" @@ -83,22 +83,22 @@ data template_file "fluent-bit" { "READ_FROM_HEAD" = "Off" "READ_FROM_TAIL" = "On" - fluent_bit_loki_outputs = jsonencode(local.fluent_bit_loki_outputs) - fluent_bit_http_outputs = jsonencode(local.fluent_bit_http_outputs) - fluent_bit_splunk_outputs = jsonencode(local.fluent_bit_splunk_outputs) - fluent_bit_datadog_outputs = jsonencode(local.fluent_bit_datadog_outputs) + fluent_bit_loki_outputs = jsonencode(local.fluent_bit_loki_outputs) + fluent_bit_http_outputs = jsonencode(local.fluent_bit_http_outputs) + fluent_bit_splunk_outputs = jsonencode(local.fluent_bit_splunk_outputs) + fluent_bit_datadog_outputs = jsonencode(local.fluent_bit_datadog_outputs) fluent_bit_newrelic_outputs = jsonencode(local.fluent_bit_newrelic_outputs) - fluent_bit_slack_outputs = jsonencode(local.fluent_bit_slack_outputs) + fluent_bit_slack_outputs = jsonencode(local.fluent_bit_slack_outputs) } } resource "helm_release" "fluentbit-config" { - count = local.fluent_bit_enable ? 1 : 0 + count = local.fluent_bit_enable ? 1 : 0 repository = "https://fluent.github.io/helm-charts" chart = "fluent-bit" name = "fluent-bit" version = "0.35.0" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name values = [ data.template_file.fluent-bit[0].rendered diff --git a/k8s/azure/aks/grafana-dashboard.tf b/k8s/azure/aks/grafana-dashboard.tf index d4224b43..9f23d7ef 100644 --- a/k8s/azure/aks/grafana-dashboard.tf +++ b/k8s/azure/aks/grafana-dashboard.tf @@ -3,13 +3,13 @@ locals { folder_creation = false grafana_dashboard_folder = local.folder_creation ? { - Kong = ["kong-official"] - Partner_Standard_API = ["partner-standard-api"] - Disk_Utilization = ["cortex-disk-utilization", "prometheus-disk-utilization"] + Kong = ["kong-official"] + Partner_Standard_API = ["partner-standard-api"] + Disk_Utilization = ["cortex-disk-utilization", "prometheus-disk-utilization"] } : {} folder_map = [ - for key, value in local.grafana_dashboard_folder : { + for key, value in local.grafana_dashboard_folder : { folder = key dashboards = value } @@ -17,7 +17,7 @@ locals { dashboard_map = merge([ for key, value in local.folder_map : { - for dashboard in value.dashboards : "${value.folder}-${dashboard}" => { + for dashboard in value.dashboards : "${value.folder}-${dashboard}" => { folder = value.folder dashboard = dashboard } @@ -25,13 +25,13 @@ locals { ]...) role_map = { - grafana_admins = "Admin" + grafana_admins = "Admin" grafana_viewers = "Editor" grafana_editors = "Viewer" } users_with_roles = flatten([ - for role, emails in var.grafana_access : [ + for role, emails in var.grafana_access: [ for email in emails : { email = email role = local.role_map[role] @@ -39,11 +39,14 @@ locals { ] ]) + users_with_roles_map = { + for user in local.users_with_roles : user.email => user + } } resource "null_resource" "wait_for_grafana" { provisioner "local-exec" { - command = <<-EOT + command = <<-EOT #!/bin/bash DOMAIN_NAME="${var.accessibility.domain_name}" @@ -101,23 +104,23 @@ resource "null_resource" "wait_for_grafana" { } resource "random_password" "admin_passwords" { - for_each = coalesce(toset(var.grafana_access.grafana_admins), toset([])) - length = 12 - special = true + for_each = coalesce(toset(var.grafana_access.grafana_admins), toset([])) + length = 12 + special = true override_special = "$" } resource "random_password" "editor_passwords" { - for_each = coalesce(toset(var.grafana_access.grafana_editors), toset([])) - length = 12 - special = true + for_each = coalesce(toset(var.grafana_access.grafana_editors), toset([])) + length = 12 + special = true override_special = "$" } resource "random_password" "viewer_passwords" { - for_each = coalesce(toset(var.grafana_access.grafana_viewers), toset([])) - length = 12 - special = true + for_each = coalesce(toset(var.grafana_access.grafana_viewers), toset([])) + length = 12 + special = true override_special = "$" } @@ -129,7 +132,7 @@ resource "grafana_user" "admins" { password = random_password.admin_passwords[each.key].result is_admin = true - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_user" "editors" { @@ -140,7 +143,7 @@ resource "grafana_user" "editors" { password = random_password.editor_passwords[each.key].result is_admin = false - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_user" "viewers" { @@ -151,7 +154,7 @@ resource "grafana_user" "viewers" { password = random_password.viewer_passwords[each.key].result is_admin = false - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_folder" "dashboard_folder" { @@ -171,7 +174,7 @@ resource "grafana_api_key" "admin_token" { name = "terraform-admin-token" role = "Admin" - depends_on = [grafana_user.admins, grafana_user.editors, grafana_user.viewers] + depends_on = [ grafana_user.admins, grafana_user.editors, grafana_user.viewers ] } resource "null_resource" "update_user_roles" { @@ -180,7 +183,7 @@ resource "null_resource" "update_user_roles" { } provisioner "local-exec" { - command = < v } + for_each = {for k,v in local.grafana_datasource_list : k => v} metadata { - name = "grafana-${each.key}-datasource" + name = "grafana-${each.key}-datasource" namespace = helm_release.grafana[0].namespace labels = { grafana_datasource = "1" @@ -75,10 +76,10 @@ resource "kubernetes_config_map" "grafana_custom_datasource" { data = { "datasource.yaml" = templatefile("${path.module}/templates/grafana-custom-datasource.yaml", { - tempo_datasource = local.enable_tempo - loki_datasource = local.enable_loki - mimir_datasource = local.enable_mimir - datasource_name = each.key + tempo_datasource = local.enable_tempo + loki_datasource = local.enable_loki + mimir_datasource = local.enable_mimir + datasource_name = each.key datasource_header_value = each.value } ) @@ -93,7 +94,7 @@ resource "kubernetes_config_map" "grafana_standard_datasource" { metadata { name = "grafana-standard-datasource" namespace = helm_release.grafana[0].namespace - labels = { + labels = { grafana_datasource = "1" } } @@ -101,14 +102,14 @@ resource "kubernetes_config_map" "grafana_standard_datasource" { data = { "datasource.yaml" = templatefile("./templates/grafana-standard-datasource.yaml", { - datasource_name = local.cluster_name + datasource_name = local.cluster_name datasource_header_value = random_uuid.grafana_standard_datasource_header_value.result - mimir_create = local.enable_mimir - loki_create = local.enable_loki - tempo_create = local.enable_tempo - cortex_create = local.enable_cortex - prometheus_create = local.prometheus_enable - }) + mimir_create = local.enable_mimir + loki_create = local.enable_loki + tempo_create = local.enable_tempo + cortex_create = local.enable_cortex + prometheus_create = local.prometheus_enable + }) } } @@ -117,42 +118,42 @@ resource "kubernetes_config_map" "grafana_service_dashboard" { metadata { name = "grafana-service-dashboard" namespace = helm_release.grafana[0].namespace - labels = { + labels = { grafana_dashboard = "1" } } data = { - "kong.json" = file("./templates/kong-official.json") - "cronjob.json" = file("./templates/cronjob.json") - "partner-standard-api.json" = file("./templates/partner-standard-api.json") - "cortex-disk-utilization.json" = file("./templates/cortex-disk-utilization.json") - "prometheus-disk-utilization.json" = file("./templates/prometheus-disk-utilization.json") + "kong.json" = file("./templates/kong-official.json") + "cronjob.json" = file("./templates/cronjob.json") + "partner-standard-api.json" = file("./templates/partner-standard-api.json") + "cortex-disk-utilization.json" = file("./templates/cortex-disk-utilization.json") + "prometheus-disk-utilization.json" = file("./templates/prometheus-disk-utilization.json") } } module "grafana_db" { - source = "../../../sql/azure-postgres" - count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) - - resource_group_name = var.resource_group_name - location = var.app_region - cluster_name = local.cluster_name - namespace = "monitoring" - administrator_login = "postgresadmin" - databases = ["grafana"] - postgres_server_name = "${local.cluster_name}-monitoring-db" - read_replica = false - sku_name = "GP_Standard_D2s_v3" - storage_mb = 32768 - key_vault_id = azurerm_key_vault.secrets.id - - tags = local.common_tags + source = "../../../sql/azure-postgres" + count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) + + resource_group_name = var.resource_group_name + location = var.app_region + cluster_name = local.cluster_name + namespace = "monitoring" + administrator_login = "postgresadmin" + databases = ["grafana"] + postgres_server_name = "${local.cluster_name}-monitoring-db" + read_replica = false + sku_name = "GP_Standard_D2s_v3" + storage_mb = 32768 + key_vault_id = azurerm_key_vault.secrets.id + + tags = local.common_tags } resource "kubernetes_service" "observability_db_service" { - count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) + count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) metadata { name = "monitoring-rds" namespace = "db" @@ -167,19 +168,19 @@ resource "kubernetes_service" "observability_db_service" { } data "azurerm_key_vault" "grafana_oauth" { - count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 + count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 name = "grafana-oauth" resource_group_name = var.resource_group_name } data "azurerm_key_vault_secret" "oauth_client_id" { - count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 + count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 name = "${local.cluster_name}-oauth-client-id" key_vault_id = data.azurerm_key_vault.grafana_oauth[0].id } data "azurerm_key_vault_secret" "oauth_client_secret" { - count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 + count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 name = "${local.cluster_name}-oauth-client-secret" key_vault_id = data.azurerm_key_vault.grafana_oauth[0].id } \ No newline at end of file diff --git a/k8s/azure/aks/observability.tf b/k8s/azure/aks/observability.tf index 43715bd5..bbb35749 100644 --- a/k8s/azure/aks/observability.tf +++ b/k8s/azure/aks/observability.tf @@ -3,7 +3,7 @@ locals { enable_tempo = try(var.observability_config.tempo != null ? var.observability_config.tempo.enable : false, false) enable_cortex = try(var.observability_config.cortex != null ? var.observability_config.cortex.enable : false, false) enable_mimir = try(var.observability_config.mimir != null ? var.observability_config.mimir.enable : false,false) - storage_account = "${replace(try(local.cluster_name != null ? local.cluster_name : "default-cluster","default-cluster"),"-","")}${random_string.storage_account_suffix.result}" + storage_account = "${replace(local.cluster_name,"-","")}${random_string.storage_account_suffix.result}" } resource "random_string" "storage_account_suffix" { diff --git a/k8s/azure/aks/outputs.tf b/k8s/azure/aks/outputs.tf index 9f9facb8..9419005b 100644 --- a/k8s/azure/aks/outputs.tf +++ b/k8s/azure/aks/outputs.tf @@ -42,15 +42,15 @@ output "node_configs" { } output "k8s_ca" { - value = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate + value = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate sensitive = true } output "custom_secrets_name_list" { - value = { - for k, v in var.custom_secrets_name_list : k => { - for secret_key in v.secrets : secret_key => - "${local.cluster_name}-${k}-${secret_key}-secret" } + value ={ + for k, v in var.custom_secrets_name_list : k => { + for secret_key in v.secrets : "${secret_key}" => + "${local.cluster_name}-${k}-${secret_key}-secret" } } } @@ -72,7 +72,7 @@ output "lbip" { output "grafana_password" { sensitive = true - value = try(random_password.observability_admin[0].result, "") + value = try(random_password.observability_admin[0].result,"") } output "grafana_admin" { @@ -80,16 +80,16 @@ output "grafana_admin" { } output "grafana_host" { - value = try(local.grafana_host, "") + value = try(local.grafana_host,"") } output "grafana_datasources" { - value = local.grafana_datasource_list + value = local.grafana_datasource_list sensitive = true } output "mimir_host_url" { - value = try(module.observability[0].mimir_host_url, "") + value = try(module.observability[0].mimir_host_url,"") } output "mimir_basic_auth_username" { @@ -105,7 +105,7 @@ output "mimir_basic_auth_password" { } output "loki_host_url" { - value = try(module.observability[0].loki_host_url, "") + value = try(module.observability[0].loki_host_url,"") } output "cluster_uid" { @@ -113,11 +113,11 @@ output "cluster_uid" { } output "tempo_host_url" { - value = try(module.observability[0].tempo_host_url, "") + value =try( module.observability[0].tempo_host_url,"") } output "cortex_host_url" { - value = try(module.observability[0].cortex_host_url, "") + value = try(module.observability[0].cortex_host_url,"") } output "grafana_user_credentials" { @@ -125,15 +125,15 @@ output "grafana_user_credentials" { { for key, pwd in random_password.admin_passwords : key => { email = key password = pwd.result - } }, + }}, { for key, pwd in random_password.editor_passwords : key => { email = key password = pwd.result - } }, + }}, { for key, pwd in random_password.viewer_passwords : key => { email = key password = pwd.result - } } + }} ) sensitive = true } diff --git a/k8s/azure/aks/prometheus.tf b/k8s/azure/aks/prometheus.tf index 88198d0d..d8e5b871 100644 --- a/k8s/azure/aks/prometheus.tf +++ b/k8s/azure/aks/prometheus.tf @@ -19,26 +19,27 @@ resource "kubernetes_secret" "prometheus_remote_write_auth" { type = "Opaque" } -locals { +locals{ ### this app namespace level alerts: - namespace_teams_webhook = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = substr(v.data, 8, length(v.data)), labels = v.labels == null ? merge(v.labels, { severity = "critical", servicealert = "true", namespace = n }) : merge(v.labels, { namespace = n }), } if v.type == "teams" } if s.alert_webhooks != null]...) - namespace_google_chat_alerts = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = v.data, labels = v.labels == null ? merge(v.labels, { severity = "critical", servicealert = "true", namespace = n }) : merge(v.labels, { namespace = n }), } if v.type == "google_chat" } if s.alert_webhooks != null]...) + namespace_teams_webhook = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = substr(v.data, 8, length(v.data)), labels = v.labels == null ? merge(v.labels, {severity = "critical", servicealert = "true",namespace = n}) : merge(v.labels, {namespace = n}), } if v.type == "teams"} if s.alert_webhooks != null]...) + namespace_google_chat_alerts = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => {data = v.data, labels = v.labels == null ? merge(v.labels, {severity = "critical", servicealert = "true",namespace = n}) : merge(v.labels, {namespace = n}), } if v.type == "google_chat"}if s.alert_webhooks != null]...) ### this is cluster level alerts: - cluster_teams_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "cluster-webhook-${key}" => { data = substr(val.data, 8, length(val.data)), labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "teams" } - cluster_moogsoft_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "moogsoft-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "moogsoft" } - cluster_pagerduty_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "pagerduty-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "pagerduty" } - cluster_google_chat_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "google-chat-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "google_chat" } - cluster_slack_alerts = jsonencode(var.slack_alerts_configs) == "" ? {} : { for key, val in var.slack_alerts_configs : "slack-alert-${val.name}" => { url = val.url, channel = val.channel, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } } - cluster_alerts = merge(local.namespace_teams_webhook, local.cluster_teams_alerts) - cluster_alerts_webhook = merge(local.cluster_alerts, local.cluster_moogsoft_alerts, local.cluster_pagerduty_alerts) - cluster_webhook_alerts = jsonencode(var.webhook_alerts_configs) == "" ? {} : { for key, val in var.webhook_alerts_configs : "webhook-alert-${val.name}" => { url = val.url, send_resolved = val.send_resolved, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } } - google_chat_alerts = merge(local.cluster_google_chat_alerts, local.namespace_google_chat_alerts) + cluster_teams_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "cluster-webhook-${key}" => { data = substr(val.data,8 ,length(val.data) ),labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "teams"} + cluster_moogsoft_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "moogsoft-webhook-${key}" => { data = val.data,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "moogsoft"} + cluster_pagerduty_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "pagerduty-webhook-${key}" => { data = val.data,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "pagerduty"} + cluster_google_chat_alerts= jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "google-chat-webhook-${key}" => {data = val.data, labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "google_chat"} + cluster_slack_alerts = jsonencode(var.slack_alerts_configs) == "" ? {} : { for key, val in var.slack_alerts_configs : "slack-alert-${val.name}" => {url = val.url, channel = val.channel,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, }} + cluster_alerts = merge(local.namespace_teams_webhook, local.cluster_teams_alerts) + cluster_alerts_webhook = merge(local.cluster_alerts, local.cluster_moogsoft_alerts, local.cluster_pagerduty_alerts) + cluster_webhook_alerts = jsonencode(var.webhook_alerts_configs) == "" ? {} : { for key, val in var.webhook_alerts_configs : "webhook-alert-${val.name}" => {url = val.url, send_resolved = val.send_resolved, labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, }} + google_chat_alerts = merge(local.cluster_google_chat_alerts, local.namespace_google_chat_alerts) ### This is list of grafana datasources - prometheus_enable = try(var.observability_config.prometheus != null ? var.observability_config.prometheus.enable : true, true) + prometheus_enable = try(var.observability_config.prometheus != null ? var.observability_config.prometheus.enable : true, true) + grafana_version = try(var.observability_config.grafana.version != null ? var.observability_config.grafana.version : "7.0.8", "7.0.8") # Create secrets for user-provided remote write configs with basic auth prometheus_remote_write_secrets = try(var.observability_config.prometheus.remote_write, null) != null ? { @@ -58,9 +59,9 @@ locals { ] : [] default_remote_write_config = local.enable_mimir && local.prometheus_enable ? [{ - host = "http://mimir-distributor.mimir:8080/api/v1/push" - key = "X-Scope-OrgID" - value = random_uuid.grafana_standard_datasource_header_value.result + host = "http://mimir-distributor.mimir:8080/api/v1/push" + key = "X-Scope-OrgID" + value = random_uuid.grafana_standard_datasource_header_value.result secret_name = null }] : [] @@ -72,32 +73,32 @@ data "template_file" "prom_template" { count = local.prometheus_enable ? 1 : 0 template = file("./templates/prometheus-values.yaml") - vars = { - PROMETHEUS_DISK_SIZE = try(var.observability_config.prometheus.persistence.disk_size != null ? var.observability_config.prometheus.persistence.disk_size : "50Gi", "50Gi") - PROMETHEUS_RETENTION_SIZE = try(var.observability_config.prometheus.persistence.retention_size != null ? var.observability_config.prometheus.persistence.retention_size : "45GB", "45GB") - PROMETHEUS_RETENTION_DURATION = try(var.observability_config.prometheus.persistence.retention_duration != null ? var.observability_config.prometheus.persistence.retention_duration : "10d", "10d") - CLUSTER_NAME = var.app_name - DOMAIN_NAME = var.accessibility.domain_name - REMOTE_WRITE_CONFIGS = jsonencode(local.remote_write_config) - ALERTS_ENABLED = jsonencode(local.cluster_moogsoft_alerts) != "" || jsonencode(local.namespace_teams_webhook) != "" || jsonencode(local.cluster_teams_alerts) != "" || jsonencode(local.google_chat_alerts) != "" || jsonencode(local.cluster_slack_alerts) != "" || jsonencode(local.cluster_webhook_alerts) != "" ? true : false - MOOGSOFT_ALERTS_ENABLED = local.cluster_moogsoft_alerts == {} ? false : true - MS_TEAMS_ALERT_ENABLED = jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? false : true - MOOGSOFT_ENDPOINT_URL = jsonencode(local.cluster_moogsoft_alerts) - MOOGSOFT_ENDPOINT_API_KEY = var.moogsoft_endpoint_api_key - MOOGSOFT_USERNAME = var.moogsoft_username - teams_webhook_alerts = jsonencode(local.cluster_alerts) - cluster_teams_alerts = jsonencode(local.cluster_alerts_webhook) - cluster_moogsoft_alerts = jsonencode(local.cluster_moogsoft_alerts) - GOOGLE_CHAT_ALERTS_ENABLED = local.google_chat_alerts == "" ? false : true - GOOGLE_CHAT_CONFIGS = jsonencode(local.google_chat_alerts) - PAGER_DUTY_ALERTS_ENABLED = local.cluster_pagerduty_alerts == "" ? false : true - PAGER_DUTY_KEY = var.pagerduty_integration_key - PAGER_DUTY_ENDPOINT_URL = jsonencode(local.cluster_pagerduty_alerts) - GRAFANA_HOST = local.grafana_host - SLACK_CHAT_ALERTS_ENABLED = local.cluster_slack_alerts == "" ? false : true - WEBHOOK_ALERTS_ENABLED = local.cluster_webhook_alerts == "" ? false : true - SLACK_CONFIGS = jsonencode(local.cluster_slack_alerts) - WEBHOOK_CONFIGS = jsonencode(local.cluster_webhook_alerts) + vars = { + PROMETHEUS_DISK_SIZE = try(var.observability_config.prometheus.persistence.disk_size != null ? var.observability_config.prometheus.persistence.disk_size : "50Gi", "50Gi") + PROMETHEUS_RETENTION_SIZE = try(var.observability_config.prometheus.persistence.retention_size != null ? var.observability_config.prometheus.persistence.retention_size : "45GB", "45GB") + PROMETHEUS_RETENTION_DURATION = try(var.observability_config.prometheus.persistence.retention_duration != null ? var.observability_config.prometheus.persistence.retention_duration : "10d", "10d") + CLUSTER_NAME = var.app_name + DOMAIN_NAME = var.accessibility.domain_name + REMOTE_WRITE_CONFIGS = jsonencode(local.remote_write_config) + ALERTS_ENABLED = jsonencode(local.cluster_moogsoft_alerts) != "" || jsonencode(local.namespace_teams_webhook) != "" || jsonencode(local.cluster_teams_alerts) != "" || jsonencode(local.google_chat_alerts) != "" || jsonencode(local.cluster_slack_alerts) != "" || jsonencode(local.cluster_webhook_alerts) != "" ? true : false + MOOGSOFT_ALERTS_ENABLED = local.cluster_moogsoft_alerts == {} ? false : true + MS_TEAMS_ALERT_ENABLED = jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? false : true + MOOGSOFT_ENDPOINT_URL = jsonencode(local.cluster_moogsoft_alerts) + MOOGSOFT_ENDPOINT_API_KEY = var.moogsoft_endpoint_api_key + MOOGSOFT_USERNAME = var.moogsoft_username + teams_webhook_alerts = jsonencode(local.cluster_alerts) + cluster_teams_alerts = jsonencode(local.cluster_alerts_webhook) + cluster_moogsoft_alerts = jsonencode(local.cluster_moogsoft_alerts) + GOOGLE_CHAT_ALERTS_ENABLED = local.google_chat_alerts == "" ? false : true + GOOGLE_CHAT_CONFIGS = jsonencode(local.google_chat_alerts) + PAGER_DUTY_ALERTS_ENABLED = local.cluster_pagerduty_alerts == "" ? false : true + PAGER_DUTY_KEY = var.pagerduty_integration_key + PAGER_DUTY_ENDPOINT_URL = jsonencode(local.cluster_pagerduty_alerts) + GRAFANA_HOST = local.grafana_host + SLACK_CHAT_ALERTS_ENABLED = local.cluster_slack_alerts == "" ? false : true + WEBHOOK_ALERTS_ENABLED = local.cluster_webhook_alerts == "" ? false : true + SLACK_CONFIGS = jsonencode(local.cluster_slack_alerts) + WEBHOOK_CONFIGS = jsonencode(local.cluster_webhook_alerts) } } @@ -106,7 +107,7 @@ resource "helm_release" "prometheus" { chart = "kube-prometheus-stack" name = "prometheus" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name create_namespace = true version = try(var.observability_config.prometheus.version != null ? var.observability_config.prometheus.version : "60.0.0", "60.0.0") @@ -136,33 +137,33 @@ resource "helm_release" "alerts_teams" { data "template_file" "cluster-alerts" { template = file("./templates/cluster-level-alerts.yaml") - vars = { + vars = { cluster_memory_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_underutilisation != null ? var.cluster_alert_thresholds.memory_underutilisation : 20) - cluster_cpu_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.cpu_underutilisation != null ? var.cluster_alert_thresholds.cpu_underutilisation : 20) - cluster_node_count_max_value = var.node_config.max_count - cluster_node_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.node_count != null ? var.cluster_alert_thresholds.node_count : 80) - cluster_pod_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.pod_count != null ? var.cluster_alert_thresholds.pod_count : 80) - cluster_total_cpu_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cpu_utilisation != null ? var.cluster_alert_thresholds.cpu_utilisation : 80) - cluster_total_memory_utilization_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_utilisation != null ? var.cluster_alert_thresholds.memory_utilisation : 20) - cluster_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.disk_utilization != null ? var.cluster_alert_thresholds.disk_utilization : 80) - cluster_name = local.cluster_name - cortex_enabled = try(var.observability_config.cortex == null ? false : var.observability_config.cortex.enable, false) - nginx_5xx_percentage_threshold = var.cluster_alert_thresholds == null ? 5 : (var.cluster_alert_thresholds.nginx_5xx_percentage_threshold != null ? var.cluster_alert_thresholds.nginx_5xx_percentage_threshold : 5) - cortex_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cortex_disk_utilization_threshold != null ? var.cluster_alert_thresholds.cortex_disk_utilization_threshold : 80) - prometheus_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.prometheus_disk_utilization_threshold != null ? var.cluster_alert_thresholds.prometheus_disk_utilization_threshold : 80) + cluster_cpu_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.cpu_underutilisation != null ? var.cluster_alert_thresholds.cpu_underutilisation : 20) + cluster_node_count_max_value = var.node_config.max_count + cluster_node_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.node_count != null ? var.cluster_alert_thresholds.node_count : 80) + cluster_pod_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.pod_count != null ? var.cluster_alert_thresholds.pod_count: 80) + cluster_total_cpu_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cpu_utilisation != null ? var.cluster_alert_thresholds.cpu_utilisation: 80) + cluster_total_memory_utilization_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_utilisation != null ? var.cluster_alert_thresholds.memory_utilisation: 20) + cluster_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.disk_utilization != null ? var.cluster_alert_thresholds.disk_utilization: 80) + cluster_name = local.cluster_name + cortex_enabled = try(var.observability_config.cortex == null ? false : var.observability_config.cortex.enable, false) + nginx_5xx_percentage_threshold = var.cluster_alert_thresholds == null ? 5 : (var.cluster_alert_thresholds.nginx_5xx_percentage_threshold != null ? var.cluster_alert_thresholds.nginx_5xx_percentage_threshold: 5) + cortex_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cortex_disk_utilization_threshold != null ? var.cluster_alert_thresholds.cortex_disk_utilization_threshold : 80) + prometheus_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.prometheus_disk_utilization_threshold != null ? var.cluster_alert_thresholds.prometheus_disk_utilization_threshold : 80) } } resource "kubectl_manifest" "cluster-alerts" { count = local.grafana_enable ? 1 : 0 - yaml_body = data.template_file.cluster-alerts.rendered + yaml_body = data.template_file.cluster-alerts.rendered depends_on = [helm_release.prometheus] } resource "random_password" "observability_admin" { - count = local.grafana_enable ? 1 : 0 - length = 16 - special = false + count = local.grafana_enable ? 1 : 0 + length = 16 + special = false } resource "azurerm_key_vault_secret" "observability_admin_secret" { diff --git a/k8s/azure/aks/vars.tf b/k8s/azure/aks/vars.tf index 3b82fa69..85c05392 100644 --- a/k8s/azure/aks/vars.tf +++ b/k8s/azure/aks/vars.tf @@ -17,7 +17,7 @@ variable "app_env" { } variable "app_region" { - type = string + type = string description = "Location where the resources to be created" default = "" } @@ -50,12 +50,12 @@ variable "kubernetes_version" { variable "user_access" { description = "List of users who will have access to clusters" type = object({ - app_admins = optional(list(string)) + app_admins = optional(list(string)) app_viewers = optional(list(string)) app_editors = optional(list(string)) }) default = { - app_admins = [] + app_admins = [] app_viewers = [] app_editors = [] } @@ -64,12 +64,12 @@ variable "user_access" { variable "grafana_access" { description = "List of users who will have access to grafana" type = object({ - grafana_admins = optional(list(string)) + grafana_admins = optional(list(string)) grafana_viewers = optional(list(string)) grafana_editors = optional(list(string)) }) default = { - grafana_admins = [] + grafana_admins = [] grafana_viewers = [] grafana_editors = [] } @@ -83,29 +83,29 @@ variable "enable_auto_scaling" { variable "node_config" { description = "List of values for the node configuration of kubernetes cluster" - type = object({ - node_type = string - min_count = number - max_count = number + type = object({ + node_type = string + min_count = number + max_count = number required_workload_type = optional(string) }) validation { - condition = (var.node_config.min_count > 0) + condition = (var.node_config.min_count > 0) error_message = "The variable kube_node_count_min must be greater than 0." } validation { - condition = (var.node_config.max_count < 30) + condition = (var.node_config.max_count < 30) error_message = "The variable kube_node_count_max value must less than 30." } } variable "app_namespaces" { description = "List of envs and respective users who will have access to edit non system resources in this cluster" - type = map(object({ - alert_webhooks = optional(list(object({ - type = string - data = string - labels = optional(map(string)) + type = map(object({ + alert_webhooks = optional(list(object({ + type = string + data = string + labels = optional(map(string)) }))) })) default = {} @@ -114,39 +114,39 @@ variable "app_namespaces" { variable "cluster_alert_thresholds" { description = "Cluster alerts threshold configuration." type = object({ - cpu_utilisation = optional(number) - cpu_underutilisation = optional(number) - node_count = optional(number) - memory_utilisation = optional(number) - memory_underutilisation = optional(number) - pod_count = optional(number) - nginx_5xx_percentage_threshold = optional(number) - disk_utilization = optional(number) - cortex_disk_utilization_threshold = optional(number) + cpu_utilisation = optional(number) + cpu_underutilisation = optional(number) + node_count = optional(number) + memory_utilisation = optional(number) + memory_underutilisation = optional(number) + pod_count = optional(number) + nginx_5xx_percentage_threshold = optional(number) + disk_utilization = optional(number) + cortex_disk_utilization_threshold = optional(number) prometheus_disk_utilization_threshold = optional(number) }) - default = { - cpu_utilisation = 80 - cpu_underutilisation = 20 - node_count = 80 - memory_utilisation = 80 - memory_underutilisation = 20 - pod_count = 80 - nginx_5xx_percentage_threshold = 5 - disk_utilization = 20 - cortex_disk_utilization_threshold = 80 + default = { + cpu_utilisation = 80 + cpu_underutilisation = 20 + node_count = 80 + memory_utilisation = 80 + memory_underutilisation = 20 + pod_count = 80 + nginx_5xx_percentage_threshold = 5 + disk_utilization = 20 + cortex_disk_utilization_threshold = 80 prometheus_disk_utilization_threshold = 80 } } variable "cluster_alert_webhooks" { description = "details for setting up of different types of alerts." - type = list(object({ - type = string - data = string + type = list(object({ + type = string + data = string labels = optional(map(string)) })) - default = [] + default = [] # example variable # cluster_alert_webhooks = [ @@ -176,7 +176,7 @@ variable "moogsoft_username" { variable "custom_secrets_name_list" { description = " list of aws secrets that were manually created by prefixing cluster name and environment " - type = map( + type = map( object( { secrets = list(string) @@ -188,25 +188,25 @@ variable "custom_secrets_name_list" { variable "pagerduty_integration_key" { description = "Pagerduty Integration key to send data to Pagerduty" - type = string - default = "" + type = string + default = "" } variable "observability_config" { description = "All the configuration related to observability(e.g prometheus, grafana, loki, tempo and cortex)" - type = object({ - suffix = optional(string) + type = object({ + suffix = optional(string) prometheus = optional(object({ - version = optional(string) - enable = bool - persistence = optional(object({ + version = optional(string) + enable = bool + persistence = optional(object({ disk_size = optional(string) retention_size = optional(string) retention_duration = optional(string) })) remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ + host = optional(string) + header = optional(object({ key = optional(string) value = optional(string) })) @@ -215,14 +215,14 @@ variable "observability_config" { }))) })) grafana = optional(object({ - version = optional(string) - enable = bool - url = optional(string) - min_replica = optional(number) - max_replica = optional(number) - request_memory = optional(string) - request_cpu = optional(string) - dashboard = optional(object({ + version = optional(string) + enable = bool + url = optional(string) + min_replica = optional(number) + max_replica = optional(number) + request_memory = optional(string) + request_cpu = optional(string) + dashboard = optional(object({ limit_memory = optional(string) limit_cpu = optional(string) request_memory = optional(string) @@ -235,8 +235,8 @@ variable "observability_config" { request_cpu = optional(string) })) persistence = optional(object({ - type = optional(string) - disk_size = optional(string) + type = optional(string) + disk_size = optional(string) })) configs = optional(object({ datasource_list = optional(map(any)) @@ -245,22 +245,22 @@ variable "observability_config" { })) })) kubernetes_event_exporter = optional(object({ - enable = bool + enable = bool log_level = optional(string) max_event_age_second = optional(string) - loki_receivers = optional(list(object({ - name = string - url = string + loki_receivers = optional(list(object({ + name = string + url = string header = optional(object({ key = string value = string })) cluster_id = optional(string) }))) - webhook_receivers = optional(list(object({ - name = string - type = string - url = string + webhook_receivers = optional(list(object({ + name = string + type = string + url = string header = optional(object({ key = string value = string @@ -274,20 +274,20 @@ variable "observability_config" { })) })) loki = optional(object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_lines_received = optional(string) - distributor_bytes_received = optional(number) + distributor_lines_received = optional(string) + distributor_bytes_received= optional(number) distributor_appended_failures = optional(number) - request_errors = optional(number) - panics = optional(number) - request_latency = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + request_errors = optional(number) + panics = optional(number) + request_latency = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) ingester = optional(object({ replicas = optional(number) @@ -340,21 +340,21 @@ variable "observability_config" { })) })) cortex = optional(object({ - enable = bool + enable = bool enable_ingress = optional(bool) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_series_per_metric = optional(number) - max_series_per_user = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_series_per_metric = optional(number) + max_series_per_user = optional(number) max_fetched_chunks_per_query = optional(number) })) query_range = optional(object({ memcached_client_timeout = optional(string) })) compactor = optional(object({ - enable = optional(bool) - replicas = optional(number) + enable = optional(bool) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -364,8 +364,8 @@ variable "observability_config" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) @@ -392,7 +392,7 @@ variable "observability_config" { })) query_frontend = optional(object({ replicas = optional(number) - enable = optional(bool) + enable = optional(bool) })) store_gateway = optional(object({ replication_factor = optional(number) @@ -448,14 +448,14 @@ variable "observability_config" { })) })) mimir = optional(object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ ingestion_rate = optional(number) @@ -465,7 +465,7 @@ variable "observability_config" { max_outstanding_requests_per_tenant = optional(number) })) compactor = optional(object({ - replicas = optional(number) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -475,22 +475,22 @@ variable "observability_config" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) querier = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) query_frontend = optional(object({ replicas = optional(number) @@ -507,30 +507,30 @@ variable "observability_config" { max_memory = optional(string) })) distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) })) })) tempo = optional(object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - ingester_bytes_received = optional(number) - distributor_ingester_appends = optional(number) + ingester_bytes_received = optional(number) + distributor_ingester_appends = optional(number) distributor_ingester_append_failures = optional(number) - ingester_live_traces = optional(number) - distributor_spans_received = optional(number) - distributor_bytes_received = optional(number) - ingester_blocks_flushed = optional(number) - tempodb_blocklist = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + ingester_live_traces = optional(number) + distributor_spans_received = optional(number) + distributor_bytes_received = optional(number) + ingester_blocks_flushed = optional(number) + tempodb_blocklist = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) max_receiver_msg_size = optional(number) ingester = optional(object({ @@ -559,10 +559,10 @@ variable "observability_config" { })) querier = optional(object({ - replicas = optional(number) + replicas = optional(number) })) query_frontend = optional(object({ - replicas = optional(number) + replicas = optional(number) })) metrics_generator = optional(object({ enable = optional(bool) @@ -570,9 +570,9 @@ variable "observability_config" { service_graphs_max_items = optional(number) service_graphs_wait = optional(string) remote_write_flush_deadline = optional(string) - remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ + remote_write = optional(list(object({ + host = optional(string) + header = optional(object({ key = optional(string) value = optional(string) })) @@ -585,36 +585,53 @@ variable "observability_config" { } +variable "domain_name_label" { + description = "Name of the domain label for Public IP(fqdn)" + type = string + default = "sample-domains" +} variable "publicip_sku" { description = "Public IP address SKU type" + type = string + default = "Standard" +} + +variable "monitoring_type" { + description = "Whether you want to use basic or rlog for monitoring (e.g basic or rlog)" type = string - default = "Standard" + default = "basic" +} + +variable "rlog_host" { + description = "Rlog Host endpoint to monitor the logs, metrics and traces (if monitoring_type is rlog, must provide this value)" + type = string + default = "" } variable "fluent_bit" { description = "Inputs for Fluent Bit configurations" - type = object({ - enable = bool - loki = optional(list(object({ + type = object({ + enable = bool + loki = optional(list(object({ host = string tenant_id = optional(string) labels = string port = optional(number) tls = optional(string) }))) - http = optional(list(object({ - host = string - port = optional(number) - uri = optional(string) - headers = optional(list(object({ + http = optional(list(object({ + host = string + port = optional(number) + uri = optional(string) + headers = optional(list(object({ key = string value = string }))) tls = optional(string) tls_verify = optional(string) }))) - splunk = optional(list(object({ + splunk = optional(list(object({ host = string token = string port = optional(number) @@ -622,36 +639,42 @@ variable "fluent_bit" { tls_verify = optional(string) }))) datadog = optional(list(object({ - host = string - api_key = string - tls = optional(string) - compress = optional(string) + host = string + api_key = string + tls = optional(string) + compress = optional(string) }))) new_relic = optional(list(object({ - host = optional(string) - api_key = string - compress = optional(string) + host = optional(string) + api_key = string + compress = optional(string) }))) - slack = optional(list(object({ - webhook = string + slack = optional(list(object({ + webhook = string }))) }) default = null } +variable "provisioner" { + description = "Provisioner being used to setup Infra" + type = string + default = "zop-dev" +} + variable "acr_list" { description = "list of acr for cluster pull permission" - type = list(string) - default = [] + type = list(string) + default = [] } variable "log_analytics_workspace_enabled" { description = "enable azure log analytics" - type = bool - default = false + type = bool + default = false } -variable "cert_issuer_config" { +variable "cert_issuer_config"{ description = "email to be added as cert-manager issuer" type = object({ env = optional(string) @@ -671,10 +694,10 @@ variable "slack_alerts_configs" { variable "webhook_alerts_configs" { type = list(object({ - name = string - url = string + name = string + url = string send_resolved = optional(bool, true) - labels = optional(map(string)) + labels = optional(map(string)) })) default = [] } diff --git a/k8s/azure/namespace/badger-db.tf b/k8s/azure/namespace/badger-db.tf index 7d6087da..68cfef4a 100644 --- a/k8s/azure/namespace/badger-db.tf +++ b/k8s/azure/namespace/badger-db.tf @@ -1,7 +1,7 @@ locals { badger_db_volume_mounts_services = tomap({ for k, v in var.services : k => { - (k) = { + "${k}" = { mount_path = "/etc/data" } } if coalesce(v.badger_db, false) @@ -9,7 +9,7 @@ locals { badger_db_volume_mounts_crons = tomap({ for k, v in var.cron_jobs : k => { - (k) = { + "${k}" = { mount_path = "/etc/data" } } if coalesce(v.badger_db, false) diff --git a/k8s/azure/namespace/configmap.tf b/k8s/azure/namespace/configmap.tf index ea37c10f..c8354fc2 100644 --- a/k8s/azure/namespace/configmap.tf +++ b/k8s/azure/namespace/configmap.tf @@ -1,5 +1,6 @@ locals { - ssl = var.sql_db != null ? (var.sql_db.enable_ssl == null ? false : var.sql_db.enable_ssl) : false + ssl = var.sql_db != null ? (var.sql_db.enable_ssl == null ? false : var.sql_db.enable_ssl) : false + env = var.deploy_env != null ? var.deploy_env : var.app_env postgres_ssl = try(var.sql_db != null && var.sql_db.type == "postgresql" ? (var.sql_db.enable_ssl == null ? false : var.sql_db.enable_ssl) : false, false) } @@ -16,7 +17,7 @@ resource "kubernetes_config_map" "namespace_configs" { } resource "kubernetes_config_map" "service_configs" { - for_each = { for k, v in var.services : k => v } + for_each = {for k,v in var.services : k => v} metadata { name = "${each.key}-infra" namespace = kubernetes_namespace.app_environments.metadata[0].name @@ -24,19 +25,19 @@ resource "kubernetes_config_map" "service_configs" { data = merge( { - "APP_NAME" = each.key - "DB_NAME" = each.value.db_name != null ? each.value.db_name : each.value.datastore_configs != null ? each.value.datastore_configs.databse : null - "DB_USER" = each.value.db_name != null ? var.sql_db.type == "mysql" ? module.mysql[0].db_user["${var.namespace}-${each.value.db_name}"] : module.postgresql[0].db_user["${var.namespace}-${each.value.db_name}"] : each.value.datastore_configs != null ? each.value.datastore_configs.type == "mysql" ? module.mysql_v2[each.value.datastore_configs.name].db_user["${var.namespace}-${each.value.datastore_configs.databse}"] : module.postgres_v2[each.value.datastore_configs.name].db_user["${var.namespace}-${each.value.datastore_configs.databse}"] : null - "DB_DIALECT" = each.value.db_name != null ? (var.sql_db.type == "mysql" ? "mysql" : "postgres") : each.value.datastore_configs != null ? (each.value.datastore_configs.type == "mysql" ? "mysql" : "postgres") : null - "DB_HOST" = each.value.db_name != null ? "${var.namespace}-sql.db" : each.value.datastore_configs != null ? "${each.value.datastore_configs.name}-sql.db" : null - "DB_PORT" = each.value.db_name != null ? var.sql_db.type == "mysql" ? module.mysql[0].db_port : module.postgresql[0].db_port : each.value.datastore_configs != null ? each.value.datastore_configs.type == "mysql" ? module.mysql_v2[each.value.datastore_configs.name].db_port : module.postgres_v2[each.value.datastore_configs.name].db_port : null - "REDIS_HOST" = each.value.redis == true || each.value.local_redis == true ? (each.value.redis == true ? "${var.namespace}-redis" : "redis-master-master") : (each.value.redis_configs != null ? "${each.value.redis_configs.name}-${var.namespace}-redis" : null), - "REDIS_PORT" = each.value.redis == true || each.value.local_redis == true ? "6379" : (each.value.redis_configs != null ? each.value.redis_configs.port : null) - }) + "APP_NAME" = each.key + "DB_NAME" = each.value.db_name != null ? each.value.db_name : each.value.datastore_configs != null ? each.value.datastore_configs.databse : null + "DB_USER" = each.value.db_name != null ? var.sql_db.type == "mysql" ? module.mysql[0].db_user["${var.namespace}-${each.value.db_name}"] : module.postgresql[0].db_user["${var.namespace}-${each.value.db_name}"] : each.value.datastore_configs != null ? each.value.datastore_configs.type == "mysql" ? module.mysql_v2[each.value.datastore_configs.name].db_user["${var.namespace}-${each.value.datastore_configs.databse}"] : module.postgres_v2[each.value.datastore_configs.name].db_user["${var.namespace}-${each.value.datastore_configs.databse}"] : null + "DB_DIALECT" = each.value.db_name != null ? (var.sql_db.type == "mysql" ? "mysql" : "postgres") : each.value.datastore_configs != null ? (each.value.datastore_configs.type == "mysql" ? "mysql" : "postgres") : null + "DB_HOST" = each.value.db_name != null ? "${var.namespace}-sql.db" : each.value.datastore_configs != null ? "${each.value.datastore_configs.name}-sql.db" : null + "DB_PORT" = each.value.db_name != null ? var.sql_db.type == "mysql" ? module.mysql[0].db_port : module.postgresql[0].db_port : each.value.datastore_configs != null ? each.value.datastore_configs.type == "mysql" ? module.mysql_v2[each.value.datastore_configs.name].db_port : module.postgres_v2[each.value.datastore_configs.name].db_port : null + "REDIS_HOST" = each.value.redis == true || each.value.local_redis == true ? (each.value.redis == true ? "${var.namespace}-redis" : "redis-master-master") : (each.value.redis_configs != null ? "${each.value.redis_configs.name}-${var.namespace}-redis" : null), + "REDIS_PORT" = each.value.redis == true || each.value.local_redis == true ? "6379" : (each.value.redis_configs != null ? each.value.redis_configs.port : null) + }) } resource "kubernetes_config_map" "cron_jobs_configs" { - for_each = { for k, v in var.cron_jobs : k => v } + for_each = {for k,v in var.cron_jobs : k => v} metadata { name = "${each.key}-infra" namespace = kubernetes_namespace.app_environments.metadata[0].name @@ -44,19 +45,19 @@ resource "kubernetes_config_map" "cron_jobs_configs" { data = merge( { - "APP_NAME" = each.key - "DB_NAME" = each.value.db_name != null ? each.value.db_name : each.value.datastore_configs != null ? each.value.datastore_configs.databse : null - "DB_USER" = each.value.db_name != null ? var.sql_db.type == "mysql" ? module.mysql[0].db_user["${var.namespace}-${each.value.db_name}"] : module.postgresql[0].db_user["${var.namespace}-${each.value.db_name}"] : each.value.datastore_configs != null ? each.value.datastore_configs.type == "mysql" ? module.mysql_v2[each.value.datastore_configs.name].db_user["${var.namespace}-${each.value.datastore_configs.databse}"] : module.postgres_v2[each.value.datastore_configs.name].db_user["${var.namespace}-${each.value.datastore_configs.databse}"] : null - "DB_DIALECT" = each.value.db_name != null ? (var.sql_db.type == "mysql" ? "mysql" : "postgres") : each.value.datastore_configs != null ? (each.value.datastore_configs.type == "mysql" ? "mysql" : "postgres") : null - "DB_HOST" = each.value.db_name != null ? "${var.namespace}-sql.db" : each.value.datastore_configs != null ? "${each.value.datastore_configs.name}-sql.db" : null - "DB_PORT" = each.value.db_name != null ? var.sql_db.type == "mysql" ? module.mysql[0].db_port : module.postgresql[0].db_port : each.value.datastore_configs != null ? each.value.datastore_configs.type == "mysql" ? module.mysql_v2[each.value.datastore_configs.name].db_port : module.postgres_v2[each.value.datastore_configs.name].db_port : null - "REDIS_HOST" = each.value.redis == true || each.value.local_redis == true ? (each.value.redis == true ? "${var.namespace}-redis" : "redis-master-master") : (each.value.redis_configs != null ? "${each.value.redis_configs.name}-${var.namespace}-redis" : null), - "REDIS_PORT" = each.value.redis == true || each.value.local_redis == true ? "6379" : (each.value.redis_configs != null ? each.value.redis_configs.port : null) - }) + "APP_NAME" = each.key + "DB_NAME" = each.value.db_name != null ? each.value.db_name : each.value.datastore_configs != null ? each.value.datastore_configs.databse : null + "DB_USER" = each.value.db_name != null ? var.sql_db.type == "mysql" ? module.mysql[0].db_user["${var.namespace}-${each.value.db_name}"] : module.postgresql[0].db_user["${var.namespace}-${each.value.db_name}"] : each.value.datastore_configs != null ? each.value.datastore_configs.type == "mysql" ? module.mysql_v2[each.value.datastore_configs.name].db_user["${var.namespace}-${each.value.datastore_configs.databse}"] : module.postgres_v2[each.value.datastore_configs.name].db_user["${var.namespace}-${each.value.datastore_configs.databse}"] : null + "DB_DIALECT" = each.value.db_name != null ? (var.sql_db.type == "mysql" ? "mysql" : "postgres") : each.value.datastore_configs != null ? (each.value.datastore_configs.type == "mysql" ? "mysql" : "postgres") : null + "DB_HOST" = each.value.db_name != null ? "${var.namespace}-sql.db" : each.value.datastore_configs != null ? "${each.value.datastore_configs.name}-sql.db" : null + "DB_PORT" = each.value.db_name != null ? var.sql_db.type == "mysql" ? module.mysql[0].db_port : module.postgresql[0].db_port : each.value.datastore_configs != null ? each.value.datastore_configs.type == "mysql" ? module.mysql_v2[each.value.datastore_configs.name].db_port : module.postgres_v2[each.value.datastore_configs.name].db_port : null + "REDIS_HOST" = each.value.redis == true || each.value.local_redis == true ? (each.value.redis == true ? "${var.namespace}-redis" : "redis-master-master") : (each.value.redis_configs != null ? "${each.value.redis_configs.name}-${var.namespace}-redis" : null), + "REDIS_PORT" = each.value.redis == true || each.value.local_redis == true ? "6379" : (each.value.redis_configs != null ? each.value.redis_configs.port : null) + }) } resource "kubernetes_config_map" "env_service_configmap" { - for_each = var.services + for_each = var.services metadata { name = each.key @@ -68,7 +69,7 @@ resource "kubernetes_config_map" "env_service_configmap" { } resource "kubernetes_config_map" "env_cron_configmap" { - for_each = var.cron_jobs + for_each = var.cron_jobs metadata { name = each.key diff --git a/k8s/azure/namespace/kubernetes.tf b/k8s/azure/namespace/kubernetes.tf index d2ec0538..b58db3b5 100644 --- a/k8s/azure/namespace/kubernetes.tf +++ b/k8s/azure/namespace/kubernetes.tf @@ -3,19 +3,19 @@ locals { } module "remote_state_gcp_cluster" { - source = "../../../remote-state/gcp" - count = var.shared_services.type == "gcp" ? 1 : 0 - bucket_name = var.shared_services.bucket - bucket_prefix = local.cluster_prefix + source = "../../../remote-state/gcp" + count = var.shared_services.type == "gcp" ? 1 : 0 + bucket_name = var.shared_services.bucket + bucket_prefix = local.cluster_prefix } module "remote_state_aws_cluster" { - source = "../../../remote-state/aws" - count = var.shared_services.type == "aws" ? 1 : 0 - bucket_name = var.shared_services.bucket - provider_id = var.shared_services.profile - bucket_prefix = local.cluster_prefix - location = var.shared_services.location + source = "../../../remote-state/aws" + count = var.shared_services.type == "aws" ? 1 : 0 + bucket_name = var.shared_services.bucket + provider_id = var.shared_services.profile + bucket_prefix = local.cluster_prefix + location = var.shared_services.location } module "remote_state_azure_cluster" { @@ -33,26 +33,26 @@ data "azurerm_kubernetes_cluster" "cluster" { } provider "kubectl" { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) token = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].cluster_host : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].cluster_host : module.remote_state_azure_cluster[0].cluster_host) load_config_file = false } provider "kubernetes" { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) } provider "helm" { kubernetes { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) } } \ No newline at end of file diff --git a/k8s/azure/namespace/nginx.tf b/k8s/azure/namespace/nginx.tf index 05ed92be..e1e03409 100644 --- a/k8s/azure/namespace/nginx.tf +++ b/k8s/azure/namespace/nginx.tf @@ -3,11 +3,11 @@ locals { default_domain_list = merge([ for service, service_config in var.services : { - (service) = { - ingress = ["${split(":", service)[0]}-${var.namespace}.${local.domain_name}"] + "${ service }" = { + ingress = ["${split(":", service)[0]}-${var.namespace}.${local.domain_name}"] basic_auth = (service_config.enable_basic_auth != null ? service_config.enable_basic_auth : false) ? true : false } - } if(coalesce(var.services[service].enable_default_ingress, false) == true) + } if (coalesce(var.services[service].enable_default_ingress, false) == true) ]...) service_custom_domain_list = merge([ @@ -23,12 +23,12 @@ locals { } # Exclude wildcard hosts from custom host logic if !can(regex("^\\*\\.", split("/", host)[0])) - }) if try(length(var.services[service].ingress_list), 0) != 0 + })if try(length(var.services[service].ingress_list),0) != 0 ]...) default_services_list = merge([ for service in keys(local.default_domain_list) : { - for ingress_name in local.default_domain_list[service].ingress : "${service}-${var.namespace}-${ingress_name}" => { + for ingress_name in local.default_domain_list[service].ingress : "${service}-${var.namespace}-${ingress_name}" => { service_name = split(":", service)[0] service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] # domain_name backward compatible with namespace based names if app_env is not given, if app_env is given then new scheme is chosen @@ -43,50 +43,50 @@ locals { wildcard_custom_hosts = merge([ for service, config in var.services : tomap({ for host in try(config.ingress_list, []) : - "${service}-${var.namespace}-${host}" => { - service_name = split(":", service)[0] - service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] - ingress_host = split("/", host)[0] - ns = var.namespace - ingress_name = lower(replace("${split(":", service)[0]}-${replace(host, "/", "-")}-wildcard-ingress", "*", "wildcard")) - base_domain = replace(split("/", host)[0], "*.", "") - } - if can(regex("^\\*\\.", split("/", host)[0])) + "${service}-${var.namespace}-${host}" => { + service_name = split(":", service)[0] + service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] + ingress_host = split("/", host)[0] + ns = var.namespace + ingress_name = lower(replace("${split(":", service)[0]}-${replace(host, "/", "-")}-wildcard-ingress", "*", "wildcard")) + base_domain = replace(split("/", host)[0], "*.", "") + } + if can(regex("^\\*\\.", split("/", host)[0])) }) if try(length(config.ingress_list), 0) != 0 ]...) } resource "random_password" "basic_auth_password" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } length = 32 special = true override_special = "_@" } resource "random_string" "basic_auth_user_name_suffix" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - length = 6 - special = true - upper = false - numeric = false + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + length = 6 + special = true + upper = false + numeric = false min_special = 2 - lower = true + lower = true } resource "azurerm_key_vault_secret" "basic_auth_credentials" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - name = "${local.cluster_name}-${var.namespace}-${each.key}-basic-auth-credentials" - value = jsonencode({ user_name = "${each.key}-${random_string.basic_auth_user_name_suffix[each.key].result}", - password = random_password.basic_auth_password[each.key].result }) + name = "${local.cluster_name}-${var.namespace}-${each.key}-basic-auth-credentials" + value = jsonencode({ user_name = "${each.key}-${random_string.basic_auth_user_name_suffix[each.key].result}", + password = random_password.basic_auth_password[each.key].result }) key_vault_id = data.azurerm_key_vault.secrets.id } resource "kubernetes_secret_v1" "basic_auth_secret" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false} metadata { - name = "${each.key}-basic-auth" + name = "${each.key}-basic-auth" namespace = var.namespace } data = { @@ -96,15 +96,15 @@ resource "kubernetes_secret_v1" "basic_auth_secret" { } resource "kubernetes_ingress_v1" "default_service_ingress" { - for_each = { for service, value in local.default_services_list : service => value } + for_each = {for service, value in local.default_services_list : service => value } metadata { name = each.value.ingress_name namespace = each.value.ns annotations = { - "kubernetes.io/ingress.class" = "nginx" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "kubernetes.io/ingress.class" = "nginx" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -133,17 +133,17 @@ resource "kubernetes_ingress_v1" "default_service_ingress" { } resource "kubernetes_ingress_v1" "custom_service_ingress" { - for_each = { for service, value in local.service_custom_domain_list : service => value if value.path_based_routing == "" } + for_each = {for service, value in local.service_custom_domain_list : service => value if value.path_based_routing == "" } metadata { name = each.value.ingress_name namespace = each.value.ns annotations = { - "kubernetes.io/ingress.class" = "nginx" - "cert-manager.io/issuer" = "letsencrypt" - "kubernetes.io/tls-acme" = "true" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "kubernetes.io/ingress.class" = "nginx" + "cert-manager.io/issuer" = "letsencrypt" + "kubernetes.io/tls-acme" = "true" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -164,15 +164,15 @@ resource "kubernetes_ingress_v1" "custom_service_ingress" { } } tls { - secret_name = "tls-secret-${each.value.ingress_host}" - hosts = [each.value.ingress_host] + secret_name ="tls-secret-${each.value.ingress_host}" + hosts =[each.value.ingress_host] } } depends_on = [kubernetes_namespace.app_environments] } resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { - for_each = { for service, value in local.service_custom_domain_list : service => value if value.path_based_routing != "" } + for_each = {for service, value in local.service_custom_domain_list : service => value if value.path_based_routing != "" } metadata { name = each.value.ingress_name namespace = each.value.ns @@ -182,9 +182,9 @@ resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { "kubernetes.io/tls-acme" = "true" "nginx.ingress.kubernetes.io/use-regex" = "true" "nginx.ingress.kubernetes.io/rewrite-target" = "/$2" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" - "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -205,8 +205,8 @@ resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { } } tls { - secret_name = "tls-secret-${each.value.ingress_host}" - hosts = [each.value.ingress_host] + secret_name ="tls-secret-${each.value.ingress_host}" + hosts =[each.value.ingress_host] } } depends_on = [kubernetes_namespace.app_environments] diff --git a/k8s/azure/namespace/sql.tf b/k8s/azure/namespace/sql.tf index e181531b..21380dd0 100644 --- a/k8s/azure/namespace/sql.tf +++ b/k8s/azure/namespace/sql.tf @@ -1,6 +1,7 @@ locals { - db_list = distinct(concat(distinct([for key, value in var.services : value.db_name]), distinct([for key, value in var.cron_jobs : value.db_name]))) + enable_db = try(var.sql_db.enable, false) + db_list = distinct(concat(distinct([for key, value in var.services: value.db_name]), distinct([for key, value in var.cron_jobs: value.db_name]))) grouped_database_map = { for pair in concat( @@ -23,25 +24,25 @@ locals { } module "mysql" { - source = "../../../sql/azure-mysql" - resource_group_name = var.resource_group_name - location = var.app_region - - count = var.sql_db == null ? 0 : (var.sql_db.type == "mysql" ? 1 : 0) - - cluster_name = local.cluster_name - namespace = var.namespace - mysql_server_name = "${local.cluster_name}-${var.namespace}-mysql-server" - databases = local.db_list - sku_name = var.sql_db.sku_name != null ? var.sql_db.sku_name : "GP_Standard_D2ds_v4" - administrator_login = var.sql_db.admin_user != null ? var.sql_db.admin_user : "mysqladmin" - storage = var.sql_db.storage != null ? var.sql_db.storage : 20 - storage_scaling = var.sql_db.storage_scaling != null ? var.sql_db.storage_scaling : true - iops = var.sql_db.iops != null ? var.sql_db.iops : 360 - io_scaling_enabled = var.sql_db.iops_scaling != null ? var.sql_db.iops_scaling : false - read_replica = var.sql_db.read_replica != null ? var.sql_db.read_replica : false - key_vault_id = data.azurerm_key_vault.secrets.id - tags = local.common_tags + source = "../../../sql/azure-mysql" + resource_group_name = var.resource_group_name + location = var.app_region + + count = var.sql_db == null ? 0 : (var.sql_db.type == "mysql" ? 1 : 0) + + cluster_name = local.cluster_name + namespace = var.namespace + mysql_server_name = "${local.cluster_name}-${var.namespace}-mysql-server" + databases = local.db_list + sku_name = var.sql_db.sku_name != null ? var.sql_db.sku_name : "GP_Standard_D2ds_v4" + administrator_login = var.sql_db.admin_user != null ? var.sql_db.admin_user : "mysqladmin" + storage = var.sql_db.storage != null ? var.sql_db.storage : 20 + storage_scaling = var.sql_db.storage_scaling != null ? var.sql_db.storage_scaling : true + iops = var.sql_db.iops != null ? var.sql_db.iops : 360 + io_scaling_enabled = var.sql_db.iops_scaling != null ? var.sql_db.iops_scaling : false + read_replica = var.sql_db.read_replica != null ? var.sql_db.read_replica : false + key_vault_id = data.azurerm_key_vault.secrets.id + tags = local.common_tags } resource "kubernetes_service" "mysql_db_service" { @@ -62,26 +63,26 @@ resource "kubernetes_service" "mysql_db_service" { module "postgresql" { - source = "../../../sql/azure-postgres" - resource_group_name = var.resource_group_name - location = var.app_region - - count = var.sql_db == null ? 0 : (var.sql_db.type == "postgresql" ? 1 : 0) - - cluster_name = local.cluster_name - namespace = var.namespace - postgres_server_name = "${local.cluster_name}-${var.namespace}-postgres-server" - databases = local.db_list - sku_name = var.sql_db.sku_name != null ? var.sql_db.sku_name : "GP_Standard_D2s_v3" - administrator_login = var.sql_db.admin_user != null ? var.sql_db.admin_user : "postgresqladmin" - storage_mb = var.sql_db.storage != null ? var.sql_db.storage : 32768 - storage_scaling = var.sql_db.storage_scaling != null ? var.sql_db.storage_scaling : false - storage_tier = var.sql_db.storage_tier != null ? var.sql_db.storage_tier : "P4" - read_replica = var.sql_db.read_replica != null ? var.sql_db.read_replica : false - key_vault_id = data.azurerm_key_vault.secrets.id - enable_ssl = var.sql_db.enable_ssl != null ? var.sql_db.enable_ssl : false - - tags = merge(local.common_tags, + source = "../../../sql/azure-postgres" + resource_group_name = var.resource_group_name + location = var.app_region + + count = var.sql_db == null ? 0 : (var.sql_db.type == "postgresql" ? 1 : 0) + + cluster_name = local.cluster_name + namespace = var.namespace + postgres_server_name = "${local.cluster_name}-${var.namespace}-postgres-server" + databases = local.db_list + sku_name = var.sql_db.sku_name != null ? var.sql_db.sku_name : "GP_Standard_D2s_v3" + administrator_login = var.sql_db.admin_user != null ? var.sql_db.admin_user : "postgresqladmin" + storage_mb = var.sql_db.storage != null ? var.sql_db.storage : 32768 + storage_scaling = var.sql_db.storage_scaling != null ? var.sql_db.storage_scaling : false + storage_tier = var.sql_db.storage_tier != null ? var.sql_db.storage_tier : "P4" + read_replica = var.sql_db.read_replica != null ? var.sql_db.read_replica : false + key_vault_id = data.azurerm_key_vault.secrets.id + enable_ssl = var.sql_db.enable_ssl != null ? var.sql_db.enable_ssl : false + + tags = merge(local.common_tags, tomap({ "Name" = "${local.cluster_name}-${var.namespace}-postgres-server" }) @@ -105,29 +106,29 @@ resource "kubernetes_service" "postgresql_db_service" { } module "mysql_v2" { - source = "../../../sql/azure-mysql" - resource_group_name = var.resource_group_name - location = var.app_region + source = "../../../sql/azure-mysql" + resource_group_name = var.resource_group_name + location = var.app_region for_each = var.sql_list != null ? { for key, value in var.sql_list : key => value if value.type == "mysql" } : {} - - cluster_name = local.cluster_name - namespace = var.namespace - mysql_server_name = each.key - databases = try(local.database_map[each.key], []) - - sku_name = each.value.sku_name != null ? each.value.sku_name : "GP_Standard_D2ds_v4" - administrator_login = each.value.admin_user != null ? each.value.admin_user : "mysqladmin" - storage = each.value.storage != null ? each.value.storage : 20 - storage_scaling = each.value.storage_scaling != null ? each.value.storage_scaling : true - iops = each.value.iops != null ? each.value.iops : 360 - io_scaling_enabled = each.value.iops_scaling != null ? each.value.iops_scaling : false - read_replica = each.value.read_replica != null ? each.value.read_replica : false - multi_ds = true - key_vault_id = data.azurerm_key_vault.secrets.id - tags = local.common_tags + + cluster_name = local.cluster_name + namespace = var.namespace + mysql_server_name = each.key + databases = try(local.database_map[each.key], []) + + sku_name = each.value.sku_name != null ? each.value.sku_name : "GP_Standard_D2ds_v4" + administrator_login = each.value.admin_user != null ? each.value.admin_user : "mysqladmin" + storage = each.value.storage != null ? each.value.storage : 20 + storage_scaling = each.value.storage_scaling != null ? each.value.storage_scaling : true + iops = each.value.iops != null ? each.value.iops : 360 + io_scaling_enabled = each.value.iops_scaling != null ? each.value.iops_scaling : false + read_replica = each.value.read_replica != null ? each.value.read_replica : false + multi_ds = true + key_vault_id = data.azurerm_key_vault.secrets.id + tags = local.common_tags } @@ -135,7 +136,7 @@ resource "kubernetes_service" "mysql_db_service_v2" { for_each = var.sql_list != null ? { for key, value in var.sql_list : key => value if value.type == "mysql" } : {} - + metadata { name = "${each.key}-sql" namespace = "db" @@ -151,30 +152,30 @@ resource "kubernetes_service" "mysql_db_service_v2" { } module "postgres_v2" { - source = "../../../sql/azure-postgres" - resource_group_name = var.resource_group_name - location = var.app_region + source = "../../../sql/azure-postgres" + resource_group_name = var.resource_group_name + location = var.app_region for_each = var.sql_list != null ? { for key, value in var.sql_list : key => value if value.type == "postgresql" } : {} - cluster_name = local.cluster_name - namespace = var.namespace - postgres_server_name = each.key - databases = try(local.database_map[each.key], []) - - sku_name = each.value.sku_name != null ? each.value.sku_name : "GP_Standard_D2s_v3" - administrator_login = each.value.admin_user != null ? each.value.admin_user : "postgresqladmin" - storage_mb = each.value.storage != null ? each.value.storage : 32768 - storage_scaling = each.value.storage_scaling != null ? each.value.storage_scaling : false - storage_tier = each.value.storage_tier != null ? each.value.storage_tier : "P4" - read_replica = each.value.read_replica != null ? each.value.read_replica : false - key_vault_id = data.azurerm_key_vault.secrets.id - multi_ds = true - enable_ssl = each.value.enable_ssl != null ? each.value.enable_ssl : false - - tags = merge(local.common_tags, + cluster_name = local.cluster_name + namespace = var.namespace + postgres_server_name = each.key + databases = try(local.database_map[each.key], []) + + sku_name = each.value.sku_name != null ? each.value.sku_name : "GP_Standard_D2s_v3" + administrator_login = each.value.admin_user != null ? each.value.admin_user : "postgresqladmin" + storage_mb = each.value.storage != null ? each.value.storage : 32768 + storage_scaling = each.value.storage_scaling != null ? each.value.storage_scaling : false + storage_tier = each.value.storage_tier != null ? each.value.storage_tier : "P4" + read_replica = each.value.read_replica != null ? each.value.read_replica : false + key_vault_id = data.azurerm_key_vault.secrets.id + multi_ds = true + enable_ssl = each.value.enable_ssl != null ? each.value.enable_ssl : false + + tags = merge(local.common_tags, tomap({ "Name" = each.key }) diff --git a/k8s/azure/namespace/vars.tf b/k8s/azure/namespace/vars.tf index 58482cae..51052b5b 100644 --- a/k8s/azure/namespace/vars.tf +++ b/k8s/azure/namespace/vars.tf @@ -4,15 +4,27 @@ variable "resource_group_name" { default = "" } +variable "storage_account_name" { + description = "Name of the storage account" + type = string + default = "" +} + +variable "container_name" { + description = "Name of the container which store tfstate files" + type = string + default = "" +} + variable "app_name" { - description = "This is the name of the cluster. This name is also used to namespace all the other resources created by this module." - type = string + description = "This is the name of the cluster. This name is also used to namespace all the other resources created by this module." + type = string } variable "app_env" { - description = "Application deployment environment." - type = string - default = "" + description = "Application deployment environment." + type = string + default = "" } variable "namespace" { @@ -22,20 +34,20 @@ variable "namespace" { } variable "app_region" { - type = string + type = string description = "Location where the resources to be created" - default = "" + default = "" } variable "user_access" { description = "List of users who will have access to clusters" type = object({ - admins = optional(list(string)) + admins = optional(list(string)) viewers = optional(list(string)) editors = optional(list(string)) }) default = { - admins = [] + admins = [] viewers = [] editors = [] } @@ -48,57 +60,63 @@ variable "accessibility" { }) } +variable "public_ingress" { + description = "Whether ingress is public or not." + type = string + default = false +} + variable "cron_jobs" { description = "Map of cron jobs to be executed within the namespace" - type = map(object({ - repo_name = optional(string) - acr_name = optional(string) - acr_resource_group = optional(string) - db_name = optional(string) - redis = optional(bool) - local_redis = optional(bool) - service_account = optional(string) - custom_secrets = optional(list(string)) - ingress_list = optional(list(string)) + type = map(object({ + repo_name = optional(string) + acr_name = optional(string) + acr_resource_group = optional(string) + db_name = optional(string) + redis = optional(bool) + local_redis = optional(bool) + service_account= optional(string) + custom_secrets = optional(list(string)) + ingress_list = optional(list(string)) enable_basic_auth = optional(bool) enable_default_ingress = optional(bool) - badger_db = optional(bool) + badger_db = optional(bool) datastore_configs = optional(object({ - name = optional(string) + name = optional(string) databse = optional(string) - type = optional(string) + type = optional(string) })) redis_configs = optional(object({ name = optional(string) port = optional(number) })) - helm_configs = optional(object({ + helm_configs = optional(object({ image_pull_secrets = optional(list(string)) image = optional(string) schedule = string suspend = optional(bool) concurrency_policy = optional(string) - http_port = optional(string) - metrics_port = optional(string) - min_cpu = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - env = optional(map(any)) - env_list = optional(list(object({ + http_port = optional(string) + metrics_port = optional(string) + min_cpu = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + env = optional(map(any)) + env_list = optional(list(object({ name = string value = string }))) - command = optional(list(string)) - configmaps_list = optional(list(string)) - secrets_list = optional(list(string)) - volume_mounts = optional(object({ - configmaps = optional(map(object({ + command = optional(list(string)) + configmaps_list = optional(list(string)) + secrets_list = optional(list(string)) + volume_mounts = optional(object({ + configmaps = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - secrets = optional(map(object({ + secrets = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) @@ -109,87 +127,87 @@ variable "cron_jobs" { })) })) })) - default = {} + default = {} } variable "services" { description = "Map of services to be deployed within the namespace" - type = map(object({ - repo_name = optional(string) - acr_name = optional(string) - acr_resource_group = optional(string) - db_name = optional(string) - redis = optional(bool) - local_redis = optional(bool) + type = map(object({ + repo_name = optional(string) + acr_name = optional(string) + acr_resource_group = optional(string) + db_name = optional(string) + redis = optional(bool) + local_redis = optional(bool) enable_default_ingress = optional(bool) - ingress_list = optional(list(string)) - custom_secrets = optional(list(string)) + ingress_list = optional(list(string)) + custom_secrets = optional(list(string)) enable_basic_auth = optional(bool) - badger_db = optional(bool) + badger_db = optional(bool) datastore_configs = optional(object({ - name = optional(string) + name = optional(string) databse = optional(string) - type = optional(string) + type = optional(string) })) redis_configs = optional(object({ name = optional(string) port = optional(number) })) - helm_configs = optional(object({ + helm_configs = optional(object({ image_pull_secrets = optional(list(string)) image = optional(string) - replica_count = optional(number) - cli_service = optional(bool) - http_port = optional(string) - metrics_port = optional(string) - ports = optional(map(any)) - min_cpu = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - min_available = optional(number) - heartbeat_url = optional(string) - env = optional(map(any)) - env_list = optional(list(object({ + replica_count = optional(number) + cli_service = optional(bool) + http_port = optional(string) + metrics_port = optional(string) + ports = optional(map(any)) + min_cpu = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + min_available = optional(number) + heartbeat_url = optional(string) + env = optional(map(any)) + env_list = optional(list(object({ name = string value = string }))) - configmaps_list = optional(list(string)) - secrets_list = optional(list(string)) - hpa = optional(object({ - enable = optional(bool) - min_replicas = optional(number) - max_replicas = optional(number) - cpu_limit = optional(number) - memory_limit = optional(number) + configmaps_list = optional(list(string)) + secrets_list = optional(list(string)) + hpa = optional(object({ + enable = optional(bool) + min_replicas = optional(number) + max_replicas = optional(number) + cpu_limit = optional(number) + memory_limit = optional(number) })) command = optional(list(string)) - readiness_probes = optional(object({ + readiness_probes = optional(object({ enable = optional(bool) initial_delay_seconds = optional(number) period_seconds = optional(number) timeout_seconds = optional(number) failure_threshold = optional(number) })) - liveness_probes = optional(object({ + liveness_probes = optional(object({ enable = optional(bool) initial_delay_seconds = optional(number) period_seconds = optional(number) timeout_seconds = optional(number) failure_threshold = optional(number) })) - volume_mounts = optional(object({ - configmaps = optional(map(object({ + volume_mounts = optional(object({ + configmaps = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - secrets = optional(map(object({ + secrets = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - pvc = optional(map(object({ + pvc = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) @@ -204,16 +222,16 @@ variable "services" { })) })) })) - default = {} + default = {} } variable "cassandra_db" { description = "Inputs to provision Cassandra instances" - type = object({ + type = object({ admin_user = string replica_count = number persistence_size = number - }) + }) default = null } @@ -224,20 +242,20 @@ variable "common_tags" { } variable "sql_db" { - description = "Inputs to provision sql instances" - type = object( + description = "Inputs to provision sql instances" + type = object( { - type = string - sku_name = optional(string) - admin_user = optional(string) - enable_ssl = optional(bool) - read_replica = optional(bool) - storage = optional(number) + type = string + sku_name = optional(string) + admin_user = optional(string) + enable_ssl = optional(bool) + read_replica = optional(bool) + storage = optional(number) storage_scaling = optional(bool) storage_tier = optional(string) iops = optional(number) iops_scaling = optional(bool) - }) + }) default = null } @@ -247,9 +265,23 @@ variable "custom_namespace_secrets" { default = [] } +variable "ingress_custom_domain" { + description = "Map for k8 ingress for custom domain." + type = map(any) + default = {} + # below is example value + # ingress_custom_domain = { + # acme = [{ ---> namespace + # service = "acme-challenge" ---> service name + # domain = "*.test1.shgw.link" ---> custom domain name + # name = "shgw.link" ---> this should be unique name + # }] + # } +} + variable "local_redis" { description = "Inputs to provision Redis instance within the cluster as a statefulset." - type = object( + type = object( { enable = bool disk_size = optional(string) @@ -276,20 +308,38 @@ variable "helm_charts" { default = {} } +variable "deploy_env" { + description = "Deployment environment" + type = string + default = null +} + +variable "provisioner" { + description = "Provisioner being used to setup Infra" + type = string + default = "zop-dev" +} + variable "shared_services" { type = object({ - type = string - bucket = string - profile = optional(string) - location = optional(string) - resource_group = optional(string) + type = string + bucket = string + profile = optional(string) + location = optional(string) + resource_group = optional(string) storage_account = optional(string) - container = optional(string) - cluster_prefix = optional(string) + container = optional(string) + cluster_prefix = optional(string) }) } -variable "cert_issuer_config" { +variable "vpc" { + description = "VPC the apps are going to use" + type = string + default = "" +} + +variable "cert_issuer_config"{ description = "email to be added as cert-manager issuer" type = object({ env = optional(string) @@ -298,18 +348,18 @@ variable "cert_issuer_config" { } variable "sql_list" { type = map(object({ - type = optional(string) - sku_name = optional(string) - admin_user = optional(string) - storage = optional(number) - storage_scaling = optional(bool) - iops = optional(number) - storage_tier = optional(string) - iops_scaling = optional(bool) - read_replica = optional(bool) - disk_autoresize = optional(string) - disk_size = optional(string) - enable_ssl = optional(bool) + type = optional(string) + sku_name = optional(string) + admin_user = optional(string) + storage = optional(number) + storage_scaling = optional(bool) + iops = optional(number) + storage_tier = optional(string) + iops_scaling = optional(bool) + read_replica = optional(bool) + disk_autoresize = optional(string) + disk_size = optional(string) + enable_ssl = optional(bool) })) default = null } \ No newline at end of file diff --git a/k8s/azure/nginx/vars.tf b/k8s/azure/nginx/vars.tf index 33db6a6a..bc9a53d4 100644 --- a/k8s/azure/nginx/vars.tf +++ b/k8s/azure/nginx/vars.tf @@ -1,8 +1,12 @@ +variable "node_port" { + description = "Node Port on which to expose kong." + type = number +} variable app_name { - type = string + type = string description = "Name of AKS cluster" - default = "" + default = "" } variable "node_resource_group" { @@ -12,7 +16,7 @@ variable "node_resource_group" { variable "lb_ip" { description = "Static IP address to attach to loadbalancer" - type = string + type = string } variable "prometheus_enabled" { diff --git a/k8s/gcp/gke/appdynamics.tf b/k8s/gcp/gke/appdynamics.tf index f3c10b5a..364e6ad6 100644 --- a/k8s/gcp/gke/appdynamics.tf +++ b/k8s/gcp/gke/appdynamics.tf @@ -3,7 +3,7 @@ resource "helm_release" "app_dynamics" { count = var.appd_controller_url == "" || var.appd_controller_url == "" || var.appd_account == "" || var.appd_user == "" || var.appd_password == "" || var.appd_accesskey == "" ? 0 : 1 chart = "cluster-agent" name = "cluster-agent" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name version = "0.1.18" repository = "https://ciscodevnet.github.io/appdynamics-charts" diff --git a/k8s/gcp/gke/fluentbit.tf b/k8s/gcp/gke/fluentbit.tf index 5e78010a..af3db6d4 100644 --- a/k8s/gcp/gke/fluentbit.tf +++ b/k8s/gcp/gke/fluentbit.tf @@ -1,31 +1,31 @@ locals { - fluent_bit_enable = var.fluent_bit != null ? (var.fluent_bit.enable != null ? var.fluent_bit.enable : false) : false - fluent_bit_loki = local.fluent_bit_enable ? (var.fluent_bit.loki != null ? var.fluent_bit.loki : []) : [] - fluent_bit_http = local.fluent_bit_enable ? (var.fluent_bit.http != null ? var.fluent_bit.http : []) : [] - fluent_bit_splunk = local.fluent_bit_enable ? (var.fluent_bit.splunk != null ? var.fluent_bit.splunk : []) : [] - fluent_bit_datadog = local.fluent_bit_enable ? (var.fluent_bit.datadog != null ? var.fluent_bit.datadog : []) : [] - fluent_bit_newrelic = local.fluent_bit_enable ? (var.fluent_bit.new_relic != null ? var.fluent_bit.new_relic : []) : [] - fluent_bit_slack = local.fluent_bit_enable ? (var.fluent_bit.slack != null ? var.fluent_bit.slack : []) : [] + fluent_bit_enable = var.fluent_bit != null ? (var.fluent_bit.enable != null ? var.fluent_bit.enable : false): false + fluent_bit_loki = local.fluent_bit_enable ? (var.fluent_bit.loki != null ? var.fluent_bit.loki : []) : [] + fluent_bit_http = local.fluent_bit_enable ? (var.fluent_bit.http != null ? var.fluent_bit.http : []) : [] + fluent_bit_splunk = local.fluent_bit_enable ? (var.fluent_bit.splunk != null ? var.fluent_bit.splunk : []): [] + fluent_bit_datadog = local.fluent_bit_enable ? (var.fluent_bit.datadog != null ? var.fluent_bit.datadog : []): [] + fluent_bit_newrelic = local.fluent_bit_enable ? (var.fluent_bit.new_relic != null ? var.fluent_bit.new_relic : []): [] + fluent_bit_slack = local.fluent_bit_enable ? (var.fluent_bit.slack != null ? var.fluent_bit.slack : []): [] fluent_bit_loki_outputs = concat([ - for k, v in local.fluent_bit_loki : { + for k,v in local.fluent_bit_loki : { host = v.host tenant_id = v.tenant_id != null ? v.tenant_id : "" labels = v.labels port = v.port != null ? v.port : 3100 - tls = v.tls != null ? v.tls : "On" + tls = v.tls != null ? v.tls : "On" } if length(local.fluent_bit_loki) > 0 - ], local.enable_loki ? [{ + ], local.enable_loki ? [{ host = "loki-distributor.loki" tenant_id = random_uuid.grafana_standard_datasource_header_value.result labels = "namespace=$kubernetes['namespace_name'],pod=$kubernetes['pod_name'],service=$kubernetes['container_name'],cluster=${local.cluster_name}" port = 3100 tls = "Off" - }] : []) + }]: []) fluent_bit_http_outputs = [ - for k, v in local.fluent_bit_http : { + for k,v in local.fluent_bit_http : { host = v.host port = v.port != null ? v.port : 80 uri = v.uri != null ? v.uri : "/" @@ -36,7 +36,7 @@ locals { ] fluent_bit_splunk_outputs = [ - for k, v in local.fluent_bit_splunk : { + for k,v in local.fluent_bit_splunk : { host = v.host token = v.token port = v.port != null ? v.port : 8088 @@ -46,34 +46,34 @@ locals { ] fluent_bit_datadog_outputs = [ - for k, v in local.fluent_bit_datadog : { - host = v.host - api_key = v.api_key - tls = v.tls != null ? v.tls : "On" - compress = v.compress != null ? v.compress : "gzip" + for k,v in local.fluent_bit_datadog : { + host = v.host + api_key = v.api_key + tls = v.tls != null ? v.tls : "On" + compress = v.compress != null ? v.compress : "gzip" } if length(local.fluent_bit_datadog) > 0 ] fluent_bit_newrelic_outputs = [ - for k, v in local.fluent_bit_newrelic : { - host = v.host != null ? v.host : "https://log-api.eu.newrelic.com/log/v1" - api_key = v.api_key - compress = v.compress != null ? v.compress : "gzip" + for k,v in local.fluent_bit_newrelic : { + host = v.host != null ? v.host : "https://log-api.eu.newrelic.com/log/v1" + api_key = v.api_key + compress = v.compress != null ? v.compress : "gzip" } if length(local.fluent_bit_newrelic) > 0 ] fluent_bit_slack_outputs = [ - for k, v in local.fluent_bit_slack : { - webhook = v.webhook + for k,v in local.fluent_bit_slack : { + webhook = v.webhook } if length(local.fluent_bit_slack) > 0 ] } -data template_file "fluent-bit" { +data template_file "fluent-bit"{ count = local.fluent_bit_enable ? 1 : 0 template = file("./templates/fluent-bit-values.yaml") - vars = { + vars = { "CLUSTER_NAME" = local.cluster_name "SERVICE_ACCOUNT" = "serviceAccount:${data.google_project.this.number}-compute@developer.gserviceaccount.com" "GCP_REGION" = var.app_region @@ -85,27 +85,27 @@ data template_file "fluent-bit" { "READ_FROM_HEAD" = "Off" "READ_FROM_TAIL" = "On" - fluent_bit_loki_outputs = jsonencode(local.fluent_bit_loki_outputs) - fluent_bit_http_outputs = jsonencode(local.fluent_bit_http_outputs) - fluent_bit_splunk_outputs = jsonencode(local.fluent_bit_splunk_outputs) - fluent_bit_datadog_outputs = jsonencode(local.fluent_bit_datadog_outputs) + fluent_bit_loki_outputs = jsonencode(local.fluent_bit_loki_outputs) + fluent_bit_http_outputs = jsonencode(local.fluent_bit_http_outputs) + fluent_bit_splunk_outputs = jsonencode(local.fluent_bit_splunk_outputs) + fluent_bit_datadog_outputs = jsonencode(local.fluent_bit_datadog_outputs) fluent_bit_newrelic_outputs = jsonencode(local.fluent_bit_newrelic_outputs) - fluent_bit_slack_outputs = jsonencode(local.fluent_bit_slack_outputs) + fluent_bit_slack_outputs = jsonencode(local.fluent_bit_slack_outputs) } } resource "helm_release" "fluentbit-config" { - count = local.fluent_bit_enable ? 1 : 0 + count = local.fluent_bit_enable ? 1 : 0 repository = "https://fluent.github.io/helm-charts" chart = "fluent-bit" name = "fluent-bit" version = "0.35.0" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name values = [ data.template_file.fluent-bit[0].rendered ] - depends_on = [ - kubernetes_namespace.monitoring - ] +depends_on = [ + kubernetes_namespace.monitoring +] } \ No newline at end of file diff --git a/k8s/gcp/gke/grafana-dashboard.tf b/k8s/gcp/gke/grafana-dashboard.tf index 4d565c48..9a38deeb 100644 --- a/k8s/gcp/gke/grafana-dashboard.tf +++ b/k8s/gcp/gke/grafana-dashboard.tf @@ -1,16 +1,16 @@ locals { - grafana_auth = local.prometheus_enable && local.grafana_enable ? "grafana-admin:${random_password.observability_admin[0].result}" : "" - folder_creation = false + grafana_auth = local.prometheus_enable && local.grafana_enable ? "grafana-admin:${random_password.observability_admin[0].result}" : "" + folder_creation = false grafana_enabled_users = local.grafana_enable && try(var.observability_config.grafana != null ? (var.observability_config.grafana.enabled_users != null ? var.observability_config.grafana.enabled_users : false) : false, false) grafana_dashboard_folder = local.folder_creation ? { - Kong = ["kong-official"] - Partner_Standard_API = ["partner-standard-api"] - Disk_Utilization = ["cortex-disk-utilization", "prometheus-disk-utilization"] + Kong = ["kong-official"] + Partner_Standard_API = ["partner-standard-api"] + Disk_Utilization = ["cortex-disk-utilization", "prometheus-disk-utilization"] } : {} folder_map = [ - for key, value in local.grafana_dashboard_folder : { + for key, value in local.grafana_dashboard_folder : { folder = key dashboards = value } @@ -18,13 +18,13 @@ locals { dashboard_map = merge([ for key, value in local.folder_map : { - for dashboard in value.dashboards : "${value.folder}-${dashboard}" => { + for dashboard in value.dashboards : "${value.folder}-${dashboard}" => { folder = value.folder dashboard = dashboard } } ]...) - + role_map = { app_admins = "Admin" app_editors = "Editor" @@ -40,12 +40,15 @@ locals { ] ]) + users_with_roles_map = { + for user in local.users_with_roles : user.email => user + } } resource "null_resource" "wait_for_grafana" { provisioner "local-exec" { - on_failure = "continue" - command = <<-EOT + on_failure = "continue" + command = <<-EOT #!/bin/bash DOMAIN_NAME="${local.domain_name}" @@ -111,23 +114,23 @@ resource "null_resource" "wait_for_grafana" { } resource "random_password" "admin_passwords" { - for_each = local.grafana_enabled_users ? coalesce(toset(var.user_access.app_admins), toset([])) : toset([]) - length = 12 - special = true + for_each = local.grafana_enabled_users ? coalesce(toset(var.user_access.app_admins), toset([])) : toset([]) + length = 12 + special = true override_special = "$" } resource "random_password" "editor_passwords" { - for_each = local.grafana_enabled_users ? coalesce(toset(var.user_access.app_editors), toset([])) : toset([]) - length = 12 - special = true + for_each = local.grafana_enabled_users ? coalesce(toset(var.user_access.app_editors), toset([])) : toset([]) + length = 12 + special = true override_special = "$" } resource "random_password" "viewer_passwords" { - for_each = local.grafana_enabled_users ? coalesce(toset(var.user_access.app_viewers), toset([])) : toset([]) - length = 12 - special = true + for_each = local.grafana_enabled_users ? coalesce(toset(var.user_access.app_viewers), toset([])) : toset([]) + length = 12 + special = true override_special = "$" } @@ -139,7 +142,7 @@ resource "grafana_user" "admins" { password = random_password.admin_passwords[each.key].result is_admin = true - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_user" "editors" { @@ -150,7 +153,7 @@ resource "grafana_user" "editors" { password = random_password.editor_passwords[each.key].result is_admin = false - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_user" "viewers" { @@ -161,7 +164,7 @@ resource "grafana_user" "viewers" { password = random_password.viewer_passwords[each.key].result is_admin = false - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_folder" "dashboard_folder" { @@ -178,11 +181,11 @@ resource "grafana_dashboard" "dashboard" { } resource "grafana_api_key" "admin_token" { - count = local.grafana_enabled_users ? local.grafana_enable ? 1 : 0 : 0 - name = "terraform-admin-token" - role = "Admin" + count = local.grafana_enabled_users ? local.grafana_enable ? 1 : 0 : 0 + name = "terraform-admin-token" + role = "Admin" - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "null_resource" "update_user_roles" { @@ -191,7 +194,7 @@ resource "null_resource" "update_user_roles" { } : {} provisioner "local-exec" { - command = < v } + for_each = {for k,v in local.grafana_datasource_list : k => v} metadata { - name = "grafana-${each.key}-datasource" + name = "grafana-${each.key}-datasource" namespace = helm_release.grafana[0].namespace labels = { grafana_datasource = "1" @@ -87,10 +87,10 @@ resource "kubernetes_config_map" "grafana_custom_datasource" { data = { "datasource.yaml" = templatefile("${path.module}/templates/grafana-custom-datasource.yaml", { - tempo_datasource = local.enable_tempo - loki_datasource = local.enable_loki - mimir_datasource = local.enable_mimir - datasource_name = each.key + tempo_datasource = local.enable_tempo + loki_datasource = local.enable_loki + mimir_datasource = local.enable_mimir + datasource_name = each.key datasource_header_value = each.value } ) @@ -103,7 +103,7 @@ resource "random_uuid" "grafana_standard_datasource_header_value" { resource "kubernetes_config_map" "grafana_standard_datasource" { count = local.prometheus_enable && local.grafana_enable ? 1 : 0 metadata { - name = "grafana-standard-datasource" + name = "grafana-standard-datasource" namespace = helm_release.grafana[0].namespace labels = { grafana_datasource = "1" @@ -113,17 +113,17 @@ resource "kubernetes_config_map" "grafana_standard_datasource" { data = { "datasource.yaml" = templatefile("./templates/grafana-standard-datasource.yaml", { - datasource_name = local.cluster_name + datasource_name = local.cluster_name datasource_header_value = random_uuid.grafana_standard_datasource_header_value.result - project_id = var.provider_id - svc_account_id = try(google_service_account.cloud_monitoring_svc_acc[0].email, "") - key = local.private_key - gcloud_monitoring = local.enable_gcloud_monitoring - mimir_create = local.enable_mimir - loki_create = local.enable_loki - tempo_create = local.enable_tempo - cortex_create = local.enable_cortex - prometheus_create = local.prometheus_enable + project_id = var.provider_id + svc_account_id = try(google_service_account.cloud_monitoring_svc_acc[0].email,"") + key = local.private_key + gcloud_monitoring = local.enable_gcloud_monitoring + mimir_create = local.enable_mimir + loki_create = local.enable_loki + tempo_create = local.enable_tempo + cortex_create = local.enable_cortex + prometheus_create = local.prometheus_enable } ) } @@ -134,44 +134,44 @@ resource "kubernetes_config_map" "grafana_service_dashboard" { metadata { name = "grafana-service-dashboard" namespace = helm_release.grafana[0].namespace - labels = { + labels = { grafana_dashboard = "1" } } data = { - "kong.json" = file("./templates/kong-official.json") - "cronjob.json" = file("./templates/cronjob.json") - "partner-standard-api.json" = file("./templates/partner-standard-api.json") - "cortex-disk-utilization.json" = file("./templates/cortex-disk-utilization.json") - "prometheus-disk-utilization.json" = file("./templates/prometheus-disk-utilization.json") + "kong.json" = file("./templates/kong-official.json") + "cronjob.json" = file("./templates/cronjob.json") + "partner-standard-api.json" = file("./templates/partner-standard-api.json") + "cortex-disk-utilization.json" = file("./templates/cortex-disk-utilization.json") + "prometheus-disk-utilization.json" = file("./templates/prometheus-disk-utilization.json") } } data "google_secret_manager_secret_version" "oauth_client_id" { - count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 - secret = "${local.cluster_name}-oauth-client-id" + count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 + secret = "${local.cluster_name}-oauth-client-id" } data "google_secret_manager_secret_version" "oauth_client_secret" { - count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 - secret = "${local.cluster_name}-oauth-client-secret" + count = local.prometheus_enable && local.grafana_enable ? (var.observability_config.grafana.configs != null ? (var.observability_config.grafana.configs.enable_sso != null ? 1 : 0) : 0) : 0 + secret = "${local.cluster_name}-oauth-client-secret" } resource "google_secret_manager_secret" "observability_admin" { - count = local.grafana_enable ? 1 : 0 - provider = google-beta - project = var.provider_id - secret_id = "${local.cluster_name}-grafana-admin-secret" - labels = local.common_tags + count = local.grafana_enable ? 1 : 0 + provider = google-beta + project = var.provider_id + secret_id = "${local.cluster_name}-grafana-admin-secret" + labels = local.common_tags replication { - automatic = true + automatic = true } } resource "google_secret_manager_secret_version" "observability_admin" { - count = local.grafana_enable ? 1 : 0 + count = local.grafana_enable ? 1 : 0 secret = google_secret_manager_secret.observability_admin[0].id secret_data = random_password.observability_admin[0].result @@ -179,7 +179,7 @@ resource "google_secret_manager_secret_version" "observability_admin" { } resource "google_secret_manager_secret_iam_member" "observability_admin" { - count = local.grafana_enable ? 1 : 0 + count = local.grafana_enable ? 1 : 0 project = var.provider_id secret_id = google_secret_manager_secret.observability_admin[0].id @@ -189,7 +189,7 @@ resource "google_secret_manager_secret_iam_member" "observability_admin" { resource "google_service_account" "cloud_monitoring_svc_acc" { - count = local.enable_gcloud_monitoring ? 1 : 0 + count = local.enable_gcloud_monitoring ? 1 : 0 project = var.provider_id account_id = "${local.cluster_service_account_name}-monitoring" display_name = "${local.cluster_name} gcloud monitoring" @@ -197,57 +197,57 @@ resource "google_service_account" "cloud_monitoring_svc_acc" { } resource "google_service_account_key" "cloud_monitoring_svc_acc" { - count = local.enable_gcloud_monitoring ? 1 : 0 + count = local.enable_gcloud_monitoring ? 1 : 0 service_account_id = google_service_account.cloud_monitoring_svc_acc[0].email } resource "google_secret_manager_secret" "cloud_monitoring_svc_acc" { - count = local.enable_gcloud_monitoring ? 1 : 0 - provider = google-beta - project = var.provider_id - secret_id = "${local.cluster_name}-cloud-monitoring-svc-acc-secret" - labels = local.common_tags + count = local.enable_gcloud_monitoring ? 1 : 0 + provider = google-beta + project = var.provider_id + secret_id = "${local.cluster_name}-cloud-monitoring-svc-acc-secret" + labels = local.common_tags replication { - automatic = true + automatic = true } } resource "google_secret_manager_secret_version" "cloud_monitoring_svc_acc" { - count = local.enable_gcloud_monitoring ? 1 : 0 - secret = google_secret_manager_secret.cloud_monitoring_svc_acc[0].id - secret_data = base64decode(google_service_account_key.cloud_monitoring_svc_acc[0].private_key) - depends_on = [google_secret_manager_secret.cloud_monitoring_svc_acc[0]] + count = local.enable_gcloud_monitoring ? 1 : 0 + secret = google_secret_manager_secret.cloud_monitoring_svc_acc[0].id + secret_data = base64decode(google_service_account_key.cloud_monitoring_svc_acc[0].private_key) + depends_on = [google_secret_manager_secret.cloud_monitoring_svc_acc[0]] } resource "google_project_iam_member" "cloud_monitoring_svc_acc_cluster" { - count = local.enable_gcloud_monitoring ? 1 : 0 - project = var.provider_id - role = "roles/monitoring.viewer" - member = "serviceAccount:${google_service_account.cloud_monitoring_svc_acc[0].email}" + count = local.enable_gcloud_monitoring ? 1 : 0 + project = var.provider_id + role = "roles/monitoring.viewer" + member = "serviceAccount:${google_service_account.cloud_monitoring_svc_acc[0].email}" } module "sql_db" { - count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) - - source = "../../../sql/gcp-sql" - - project_id = var.provider_id - project_number = data.google_project.this.number - region = var.app_region - vpc_name = data.google_compute_network.vpc.self_link - cluster_name = local.cluster_name - namespace = "monitoring" - sql_name = "${local.cluster_name}-monitoring-sql-db" - sql_type = "postgresql" - databases = ["grafana"] - machine_type = "db-f1-micro" - disk_size = 10 - availability_type = "ZONAL" - deletion_protection = local.grafana_db_deletion_protection - read_replica = false - activation_policy = "ALWAYS" - labels = local.common_tags - enable_ssl = false + count = try(local.grafana_enable && var.observability_config.grafana.persistence.type == "db" ? 1 : 0, 0) + + source = "../../../sql/gcp-sql" + + project_id = var.provider_id + project_number = data.google_project.this.number + region = var.app_region + vpc_name = data.google_compute_network.vpc.self_link + cluster_name = local.cluster_name + namespace = "monitoring" + sql_name = "${local.cluster_name}-monitoring-sql-db" + sql_type = "postgresql" + databases = ["grafana"] + machine_type = "db-f1-micro" + disk_size = 10 + availability_type = "ZONAL" + deletion_protection = local.grafana_db_deletion_protection + read_replica = false + activation_policy = "ALWAYS" + labels = local.common_tags + enable_ssl = false } \ No newline at end of file diff --git a/k8s/gcp/gke/helm.tf b/k8s/gcp/gke/helm.tf index dede5c54..74fa8577 100644 --- a/k8s/gcp/gke/helm.tf +++ b/k8s/gcp/gke/helm.tf @@ -1,5 +1,5 @@ data "google_container_cluster" "gke" { - name = module.gke.name + name = module.gke.name location = var.app_region } @@ -7,8 +7,8 @@ provider "helm" { kubernetes { host = "https://${module.gke.endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(module.gke.ca_certificate) } } @@ -17,7 +17,7 @@ provider "kubectl" { load_config_file = false host = "https://${module.gke.endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(module.gke.ca_certificate) } \ No newline at end of file diff --git a/k8s/gcp/gke/kubernetes.tf b/k8s/gcp/gke/kubernetes.tf index b4773722..609add6f 100644 --- a/k8s/gcp/gke/kubernetes.tf +++ b/k8s/gcp/gke/kubernetes.tf @@ -8,7 +8,7 @@ provider "kubernetes" { host = "https://${module.gke.endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(module.gke.ca_certificate) } \ No newline at end of file diff --git a/k8s/gcp/gke/main.tf b/k8s/gcp/gke/main.tf index df95fad4..c4878217 100644 --- a/k8s/gcp/gke/main.tf +++ b/k8s/gcp/gke/main.tf @@ -4,7 +4,7 @@ data "google_project" "this" { data "google_client_config" "default" {} data "google_compute_network" "vpc" { - name = var.vpc + name = var.vpc } data "google_compute_subnetwork" "app_subnet" { @@ -13,35 +13,37 @@ data "google_compute_subnetwork" "app_subnet" { } resource "random_string" "cluster_svc_account" { - length = 16 - numeric = true - lower = true - upper = false - special = false + length = 16 + numeric = true + lower = true + upper = false + special = false } locals { cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" - namespaces = [for namespace in var.namespace_folder_list : split("/", namespace)[0]] + cluster_name_parts = split("-", local.cluster_name) + environment = var.app_env + namespaces = [for namespace in var.namespace_folder_list : split("/", namespace)[0]] node_port = 32443 # Node port which will be used by LB for exposure - cidr_blocks = try(var.accessibility.cidr_blocks != null ? var.accessibility.cidr_blocks : ["10.0.0.0/8"], ["10.0.0.0/8"]) + cidr_blocks = try(var.accessibility.cidr_blocks != null ? var.accessibility.cidr_blocks : ["10.0.0.0/8"], ["10.0.0.0/8"] ) cluster_networks = concat([ for cidr in local.cidr_blocks : { - cidr_block = cidr + cidr_block = cidr display_name = "${cidr} cidr block" } ]) - cluster_service_account_name = regex("[a-z][-a-z0-9]{4,29}", random_string.cluster_svc_account.result) + cluster_service_account_name = regex("[a-z][-a-z0-9]{4,29}", random_string.cluster_svc_account.result ) - common_tags = merge(var.common_tags, + common_tags = merge(var.common_tags, tomap({ - project = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name, local.cluster_name) + project = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name ,local.cluster_name) provisioner = try(var.standard_tags.provisioner != null ? var.standard_tags.provisioner : "zop-dev", "zop-dev") - })) - - enable_monitoring_node_pool = try(var.monitoring_node_config.enable_monitoring_node_pool != null ? var.monitoring_node_config.enable_monitoring_node_pool : false, false) + })) + + enable_monitoring_node_pool = try(var.monitoring_node_config.enable_monitoring_node_pool != null ? var.monitoring_node_config.enable_monitoring_node_pool: false, false) } module "gke" { @@ -65,8 +67,8 @@ module "gke" { release_channel = "UNSPECIFIED" deletion_protection = var.cluster_deletion_protection - cluster_autoscaling = { - enabled = false + cluster_autoscaling = { + enabled = false autoscaling_profile = "BALANCED" max_cpu_cores = 0 min_cpu_cores = 0 @@ -80,41 +82,41 @@ module "gke" { master_authorized_networks = local.cluster_networks node_pools = concat( - [{ - name = "node-pool" - image_type = "ubuntu_containerd" - machine_type = var.node_config.node_type - min_count = var.node_config.min_count - max_count = var.node_config.max_count - service_account = "${data.google_project.this.number}-compute@developer.gserviceaccount.com" - }], - local.enable_monitoring_node_pool ? [{ - name = "monitoring-pool" - image_type = "ubuntu_containerd" - machine_type = try(var.monitoring_node_config.node_type, "e2-standard-2") - min_count = try(var.monitoring_node_config.min_count, 1) - max_count = try(var.monitoring_node_config.max_count, 1) - service_account = "${data.google_project.this.number}-compute@developer.gserviceaccount.com" - }] : [] + [{ + name = "node-pool" + image_type = "ubuntu_containerd" + machine_type = var.node_config.node_type + min_count = var.node_config.min_count + max_count = var.node_config.max_count + service_account = "${data.google_project.this.number}-compute@developer.gserviceaccount.com" + }], + local.enable_monitoring_node_pool ? [{ + name = "monitoring-pool" + image_type = "ubuntu_containerd" + machine_type = try(var.monitoring_node_config.node_type, "e2-standard-2") + min_count = try(var.monitoring_node_config.min_count, 1) + max_count = try(var.monitoring_node_config.max_count, 1) + service_account = "${data.google_project.this.number}-compute@developer.gserviceaccount.com" + }] : [] ) - node_pools_labels = { - monitoring-pool = { - role = "monitoring" + node_pools_labels = { + monitoring-pool = { + role = "monitoring" + } } - } - node_pools_taints = { - "monitoring-pool" = [ - { - key = "workload" - value = "monitoring" - effect = "NO_SCHEDULE" - } - ] - } + node_pools_taints = { + "monitoring-pool" = [ + { + key = "workload" + value = "monitoring" + effect = "NO_SCHEDULE" + } + ] + } - node_pools_oauth_scopes = { + node_pools_oauth_scopes = { "${local.cluster_name}-node-pool" = [ "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/logging.write", diff --git a/k8s/gcp/gke/nginx.tf b/k8s/gcp/gke/nginx.tf index a3680a1d..6f89cdfa 100644 --- a/k8s/gcp/gke/nginx.tf +++ b/k8s/gcp/gke/nginx.tf @@ -1,6 +1,10 @@ +data "google_compute_zones" "available_zones" { + project = var.provider_id + region = var.app_region +} module "nginx" { - project = var.provider_id + project = var.provider_id source = "../nginx" diff --git a/k8s/gcp/gke/prometheus.tf b/k8s/gcp/gke/prometheus.tf index 4e927dd4..2f75eee5 100644 --- a/k8s/gcp/gke/prometheus.tf +++ b/k8s/gcp/gke/prometheus.tf @@ -19,21 +19,21 @@ resource "kubernetes_secret" "prometheus_remote_write_auth" { type = "Opaque" } -locals { +locals{ ### this app namespace level alerts: - namespace_teams_webhook = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = substr(v.data, 8, length(v.data)), labels = v.labels == null ? merge(v.labels, { severity = "critical", servicealert = "true", namespace = n }) : merge(v.labels, { namespace = n }), } if v.type == "teams" } if s.alert_webhooks != null]...) - namespace_google_chat_alerts = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = v.data, labels = v.labels == null ? merge(v.labels, { severity = "critical", servicealert = "true", namespace = n }) : merge(v.labels, { namespace = n }), } if v.type == "google_chat" } if s.alert_webhooks != null]...) + namespace_teams_webhook = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = substr(v.data, 8, length(v.data)), labels = v.labels == null ? merge(v.labels, {severity = "critical", servicealert = "true",namespace = n}) : merge(v.labels, {namespace = n}), } if v.type == "teams"}if s.alert_webhooks != null]...) + namespace_google_chat_alerts = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = v.data, labels = v.labels == null ? merge(v.labels, {severity = "critical", servicealert = "true",namespace = n}) : merge(v.labels, {namespace = n}), } if v.type == "google_chat"}if s.alert_webhooks != null]...) ### this is cluster level alerts: - cluster_teams_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "cluster-webhook-${key}" => { data = substr(val.data, 8, length(val.data)), labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "teams" } - cluster_moogsoft_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "moogsoft-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "moogsoft" } - cluster_pagerduty_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "pagerduty-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "pagerduty" } - cluster_google_chat_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "google-chat-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "google_chat" } - cluster_alerts = merge(local.namespace_teams_webhook, local.cluster_teams_alerts) - cluster_alerts_webhook = merge(local.cluster_alerts, local.cluster_moogsoft_alerts, local.cluster_pagerduty_alerts) - cluster_slack_alerts = jsonencode(var.slack_alerts_configs) == "" ? {} : { for key, val in var.slack_alerts_configs : "slack-alert-${val.name}" => { url = val.url, channel = val.channel, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } } - cluster_webhook_alerts = jsonencode(var.webhook_alerts_configs) == "" ? {} : { for key, val in var.webhook_alerts_configs : "webhook-alert-${val.name}" => { url = val.url, send_resolved = val.send_resolved, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } } - google_chat_alerts = merge(local.cluster_google_chat_alerts, local.namespace_google_chat_alerts) + cluster_teams_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "cluster-webhook-${key}" => { data = substr(val.data,8 ,length(val.data) ),labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "teams"} + cluster_moogsoft_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "moogsoft-webhook-${key}" => { data = val.data,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "moogsoft"} + cluster_pagerduty_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "pagerduty-webhook-${key}" => { data = val.data,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "pagerduty"} + cluster_google_chat_alerts= jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "google-chat-webhook-${key}" => {data = val.data, labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "google_chat"} + cluster_alerts = merge(local.namespace_teams_webhook, local.cluster_teams_alerts) + cluster_alerts_webhook = merge(local.cluster_alerts, local.cluster_moogsoft_alerts, local.cluster_pagerduty_alerts) + cluster_slack_alerts = jsonencode(var.slack_alerts_configs) == "" ? {} : { for key, val in var.slack_alerts_configs : "slack-alert-${val.name}" => {url = val.url, channel = val.channel,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, }} + cluster_webhook_alerts = jsonencode(var.webhook_alerts_configs) == "" ? {} : { for key, val in var.webhook_alerts_configs : "webhook-alert-${val.name}" => {url = val.url, send_resolved = val.send_resolved, labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, }} + google_chat_alerts = merge( local.cluster_google_chat_alerts, local.namespace_google_chat_alerts) # Create secrets for user-provided remote write configs with basic auth prometheus_remote_write_secrets = try(var.observability_config.prometheus.remote_write, null) != null ? { @@ -53,9 +53,9 @@ locals { ] : [] default_remote_write_config = local.enable_mimir && local.prometheus_enable ? [{ - host = "http://mimir-distributor.mimir:8080/api/v1/push" - key = "X-Scope-OrgID" - value = random_uuid.grafana_standard_datasource_header_value.result + host = "http://mimir-distributor.mimir:8080/api/v1/push" + key = "X-Scope-OrgID" + value = random_uuid.grafana_standard_datasource_header_value.result secret_name = null }] : [] @@ -66,32 +66,32 @@ data "template_file" "prom_template" { count = local.prometheus_enable ? 1 : 0 template = file("./templates/prometheus-values.yaml") - vars = { - PROMETHEUS_DISK_SIZE = try(var.observability_config.prometheus.persistence.disk_size != null ? var.observability_config.prometheus.persistence.disk_size : "50Gi", "50Gi") - PROMETHEUS_RETENTION_SIZE = try(var.observability_config.prometheus.persistence.retention_size != null ? var.observability_config.prometheus.persistence.retention_size : "20GB", "20GB") - PROMETHEUS_RETENTION_DURATION = try(var.observability_config.prometheus.persistence.retention_duration != null ? var.observability_config.prometheus.persistence.retention_duration : "7d", "7d") - CLUSTER_NAME = local.cluster_name - REMOTE_WRITE_CONFIGS = jsonencode(local.remote_write_config) - ALERTS_ENABLED = jsonencode(local.cluster_moogsoft_alerts) != "" || jsonencode(local.namespace_teams_webhook) != "" || jsonencode(local.cluster_teams_alerts) != "" || jsonencode(local.google_chat_alerts) != "" || jsonencode(local.cluster_slack_alerts) != "" || jsonencode(local.cluster_webhook_alerts) != "" ? true : false - MOOGSOFT_ALERTS_ENABLED = local.cluster_moogsoft_alerts == {} ? false : true - MS_TEAMS_ALERT_ENABLED = jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? false : true - MOOGSOFT_ENDPOINT_URL = jsonencode(local.cluster_moogsoft_alerts) - MOOGSOFT_ENDPOINT_API_KEY = var.moogsoft_endpoint_api_key - MOOGSOFT_USERNAME = var.moogsoft_username - teams_webhook_alerts = jsonencode(local.cluster_alerts) - cluster_moogsoft_alerts = jsonencode(local.cluster_moogsoft_alerts) - cluster_teams_alerts = jsonencode(local.cluster_alerts_webhook) - GOOGLE_CHAT_ALERTS_ENABLED = local.google_chat_alerts == "" ? false : true - SLACK_CHAT_ALERTS_ENABLED = local.cluster_slack_alerts == "" ? false : true - WEBHOOK_ALERTS_ENABLED = local.cluster_webhook_alerts == "" ? false : true - GOOGLE_CHAT_CONFIGS = jsonencode(local.google_chat_alerts) - SLACK_CONFIGS = jsonencode(local.cluster_slack_alerts) - WEBHOOK_CONFIGS = jsonencode(local.cluster_webhook_alerts) - PAGER_DUTY_ALERTS_ENABLED = local.cluster_pagerduty_alerts == "" ? false : true - PAGER_DUTY_KEY = var.pagerduty_integration_key - PAGER_DUTY_ENDPOINT_URL = jsonencode(local.cluster_pagerduty_alerts) - GRAFANA_HOST = local.grafana_enable ? local.grafana_host : "" - USE_MONITORING_NODE_POOL = try(local.enable_monitoring_node_pool != null ? local.enable_monitoring_node_pool : false, false) + vars = { + PROMETHEUS_DISK_SIZE = try(var.observability_config.prometheus.persistence.disk_size != null ? var.observability_config.prometheus.persistence.disk_size : "50Gi", "50Gi") + PROMETHEUS_RETENTION_SIZE = try(var.observability_config.prometheus.persistence.retention_size != null ? var.observability_config.prometheus.persistence.retention_size : "20GB", "20GB") + PROMETHEUS_RETENTION_DURATION = try(var.observability_config.prometheus.persistence.retention_duration != null ? var.observability_config.prometheus.persistence.retention_duration : "7d", "7d") + CLUSTER_NAME = local.cluster_name + REMOTE_WRITE_CONFIGS = jsonencode(local.remote_write_config) + ALERTS_ENABLED = jsonencode(local.cluster_moogsoft_alerts) != "" || jsonencode(local.namespace_teams_webhook) != "" || jsonencode(local.cluster_teams_alerts) != "" || jsonencode(local.google_chat_alerts) != "" || jsonencode(local.cluster_slack_alerts) != "" || jsonencode(local.cluster_webhook_alerts) != "" ? true : false + MOOGSOFT_ALERTS_ENABLED = local.cluster_moogsoft_alerts == {} ? false : true + MS_TEAMS_ALERT_ENABLED = jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? false : true + MOOGSOFT_ENDPOINT_URL = jsonencode(local.cluster_moogsoft_alerts) + MOOGSOFT_ENDPOINT_API_KEY = var.moogsoft_endpoint_api_key + MOOGSOFT_USERNAME = var.moogsoft_username + teams_webhook_alerts = jsonencode(local.cluster_alerts) + cluster_moogsoft_alerts = jsonencode(local.cluster_moogsoft_alerts) + cluster_teams_alerts = jsonencode(local.cluster_alerts_webhook) + GOOGLE_CHAT_ALERTS_ENABLED = local.google_chat_alerts == "" ? false : true + SLACK_CHAT_ALERTS_ENABLED = local.cluster_slack_alerts == "" ? false : true + WEBHOOK_ALERTS_ENABLED = local.cluster_webhook_alerts == "" ? false : true + GOOGLE_CHAT_CONFIGS = jsonencode(local.google_chat_alerts) + SLACK_CONFIGS = jsonencode(local.cluster_slack_alerts) + WEBHOOK_CONFIGS = jsonencode(local.cluster_webhook_alerts) + PAGER_DUTY_ALERTS_ENABLED = local.cluster_pagerduty_alerts == "" ? false : true + PAGER_DUTY_KEY = var.pagerduty_integration_key + PAGER_DUTY_ENDPOINT_URL = jsonencode(local.cluster_pagerduty_alerts) + GRAFANA_HOST = local.grafana_enable ? local.grafana_host : "" + USE_MONITORING_NODE_POOL = try(local.enable_monitoring_node_pool != null ? local.enable_monitoring_node_pool : false, false) } } @@ -102,7 +102,7 @@ resource "helm_release" "prometheus" { chart = "kube-prometheus-stack" name = "prometheus" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name create_namespace = true version = try(var.observability_config.prometheus.version != null ? var.observability_config.prometheus.version : "60.0.0", "60.0.0") timeout = 1200 @@ -115,7 +115,7 @@ resource "helm_release" "prometheus" { } resource "helm_release" "alerts_teams" { - count = local.prometheus_enable && local.grafana_enable ? (jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? 0 : 1) : 0 + count = local.prometheus_enable && local.grafana_enable ? (jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? 0 : 1 ) : 0 repository = "https://prometheus-msteams.github.io/prometheus-msteams" chart = "prometheus-msteams" @@ -130,20 +130,20 @@ resource "helm_release" "alerts_teams" { data "template_file" "cluster-alerts" { template = file("./templates/cluster-level-alerts.yaml") - vars = { + vars = { cluster_memory_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_underutilisation != null ? var.cluster_alert_thresholds.memory_underutilisation : 20) - cluster_cpu_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.cpu_underutilisation != null ? var.cluster_alert_thresholds.cpu_underutilisation : 20) - cluster_node_count_max_value = local.enable_monitoring_node_pool ? var.monitoring_node_config.max_count : var.node_config.max_count - cluster_node_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.node_count != null ? var.cluster_alert_thresholds.node_count : 80) - cluster_pod_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.pod_count != null ? var.cluster_alert_thresholds.pod_count : 80) - cluster_total_cpu_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cpu_utilisation != null ? var.cluster_alert_thresholds.cpu_utilisation : 80) - cluster_total_memory_utilization_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_utilisation != null ? var.cluster_alert_thresholds.memory_utilisation : 20) - cluster_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.disk_utilization != null ? var.cluster_alert_thresholds.disk_utilization : 80) - cluster_name = local.cluster_name - cortex_enabled = try(var.observability_config.cortex == null ? false : var.observability_config.cortex.enable, false) - nginx_5xx_percentage_threshold = var.cluster_alert_thresholds == null ? 5 : (var.cluster_alert_thresholds.nginx_5xx_percentage_threshold != null ? var.cluster_alert_thresholds.nginx_5xx_percentage_threshold : 5) - cortex_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cortex_disk_utilization_threshold != null ? var.cluster_alert_thresholds.cortex_disk_utilization_threshold : 80) - prometheus_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.prometheus_disk_utilization_threshold != null ? var.cluster_alert_thresholds.prometheus_disk_utilization_threshold : 80) + cluster_cpu_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.cpu_underutilisation != null ? var.cluster_alert_thresholds.cpu_underutilisation : 20) + cluster_node_count_max_value = local.enable_monitoring_node_pool ? var.monitoring_node_config.max_count : var.node_config.max_count + cluster_node_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.node_count != null ? var.cluster_alert_thresholds.node_count : 80) + cluster_pod_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.pod_count != null ? var.cluster_alert_thresholds.pod_count: 80) + cluster_total_cpu_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cpu_utilisation != null ? var.cluster_alert_thresholds.cpu_utilisation: 80) + cluster_total_memory_utilization_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_utilisation != null ? var.cluster_alert_thresholds.memory_utilisation: 20) + cluster_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.disk_utilization != null ? var.cluster_alert_thresholds.disk_utilization: 80) + cluster_name = local.cluster_name + cortex_enabled = try(var.observability_config.cortex == null ? false : var.observability_config.cortex.enable, false) + nginx_5xx_percentage_threshold = var.cluster_alert_thresholds == null ? 5 : (var.cluster_alert_thresholds.nginx_5xx_percentage_threshold != null ? var.cluster_alert_thresholds.nginx_5xx_percentage_threshold: 5) + cortex_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cortex_disk_utilization_threshold != null ? var.cluster_alert_thresholds.cortex_disk_utilization_threshold : 80) + prometheus_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.prometheus_disk_utilization_threshold != null ? var.cluster_alert_thresholds.prometheus_disk_utilization_threshold : 80) } } diff --git a/k8s/gcp/gke/vars.tf b/k8s/gcp/gke/vars.tf index b0409012..5a792635 100644 --- a/k8s/gcp/gke/vars.tf +++ b/k8s/gcp/gke/vars.tf @@ -1,737 +1,759 @@ variable "app_name" { - description = "This is the name for the cluster. This name is also used to namespace all the other resources created by this module." - type = string + description = "This is the name for the cluster. This name is also used to namespace all the other resources created by this module." + type = string } variable "app_env" { - description = "Application deployment environment." - type = string - default = "" + description = "Application deployment environment." + type = string + default = "" } variable "app_region" { - description = "Cloud region to deploy to (e.g. us-east1)" - type = string + description = "Cloud region to deploy to (e.g. us-east1)" + type = string } variable "provider_id" { - description = "ID of the GCP project" - type = string + description = "ID of the GCP project" + type = string } variable "vpc" { - description = "VPC the apps are going to use" - type = string - default = "" + description = "VPC the apps are going to use" + type = string + default = "" } variable "subnet" { - description = "Subnets IDs the apps are going to use" - type = string - default = "" + description = "Subnets IDs the apps are going to use" + type = string + default = "" } variable "user_access" { - description = "List of users who will have access to clusters" - type = object({ - app_admins = optional(list(string)) - app_viewers = optional(list(string)) - app_editors = optional(list(string)) - }) - default = { - app_admins = [] - app_viewers = [] - app_editors = [] - } + description = "List of users who will have access to clusters" + type = object({ + app_admins = optional(list(string)) + app_viewers = optional(list(string)) + app_editors = optional(list(string)) + }) + default = { + app_admins = [] + app_viewers = [] + app_editors = [] + } } variable "node_config" { - description = "List of values for the node configuration of kubernetes cluster" - type = object({ - node_type = string - min_count = number - max_count = number - availability_zones = optional(list(string)) - }) - validation { - condition = (var.node_config.min_count > 0) - error_message = "The variable kube_node_count_min must be greater than 0." - } - validation { - condition = (var.node_config.max_count < 30) - error_message = "The variable kube_node_count_max value must less than 30." - } + description = "List of values for the node configuration of kubernetes cluster" + type = object({ + node_type = string + min_count = number + max_count = number + availability_zones = optional(list(string)) + }) + validation { + condition = (var.node_config.min_count > 0) + error_message = "The variable kube_node_count_min must be greater than 0." + } + validation { + condition = (var.node_config.max_count < 30) + error_message = "The variable kube_node_count_max value must less than 30." + } } variable "monitoring_node_config" { - description = "List of values for the node configuration of kubernetes cluster" - type = object({ - enable_monitoring_node_pool = optional(bool) - node_type = optional(string) - min_count = optional(number) - max_count = optional(number) - availability_zones = optional(list(string)) - }) - default = null + description = "List of values for the node configuration of kubernetes cluster" + type = object({ + enable_monitoring_node_pool = optional(bool) + node_type = optional(string) + min_count = optional(number) + max_count = optional(number) + availability_zones = optional(list(string)) + }) + default = null } variable "appd_controller_url" { - description = "AppDynamics Controller URL." - type = string - default = "" + description = "AppDynamics Controller URL." + type = string + default = "" } variable "appd_account" { - description = "AppDynamics Account." - type = string - default = "" + description = "AppDynamics Account." + type = string + default = "" } variable "appd_user" { - description = "AppDynamics Username." - type = string - default = "" + description = "AppDynamics Username." + type = string + default = "" } variable "appd_password" { - description = "AppDynamics Password." - type = string - default = "" + description = "AppDynamics Password." + type = string + default = "" } variable "appd_accesskey" { - description = "AppDynamics Accesskey." - type = string - default = "" + description = "AppDynamics Accesskey." + type = string + default = "" } variable "app_namespaces" { - description = "Details for setting up of different types of alerts at namespace level." - type = map(object({ - alert_webhooks = optional(list(object({ - type = string - data = string - labels = optional(map(string)) - }))) - })) - default = {} + description = "Details for setting up of different types of alerts at namespace level." + type = map(object({ + alert_webhooks = optional(list(object({ + type = string + data = string + labels = optional(map(string)) + }))) + })) + default = {} } variable "moogsoft_endpoint_api_key" { - description = "Moogsoft API key to configure your third-party system to send data to Moogsoft." - type = string - default = "" + description = "Moogsoft API key to configure your third-party system to send data to Moogsoft." + type = string + default = "" } variable "moogsoft_username" { - description = "Username for moogsoft authentication" - type = string - default = "" + description = "Username for moogsoft authentication" + type = string + default = "" } variable "cluster_alert_webhooks" { - description = "Details for setting up of different types of alerts." - type = list(object({ - type = string - data = string - labels = optional(map(string)) - })) - default = [] - - # example variable - # cluster_alert_webhooks = [ - # { - # type = "teams", ---> teams, moogsoft etc - # data = "https://zop.webhook.office.com/webhookb2/a22c241c-63f9-498c-b688-ac26b18d4b65@1113e38c-6dd4-428c-811d-24932bc2d5de/IncomingWebhook/c788d456400a4b399b4f191111da8c3fb/ea9e1aa2-6b1f-41e-8afe-fd5539f2bb8b" - # }, - # { - # type = "moogsoft", ---> teams, moogsoft etc - # data = "https://zop.moogsoft.qa/prometheus" - # } - # - # ] + description = "Details for setting up of different types of alerts." + type = list(object({ + type = string + data = string + labels = optional(map(string)) + })) + default = [] + + # example variable + # cluster_alert_webhooks = [ + # { + # type = "teams", ---> teams, moogsoft etc + # data = "https://zop.webhook.office.com/webhookb2/a22c241c-63f9-498c-b688-ac26b18d4b65@1113e38c-6dd4-428c-811d-24932bc2d5de/IncomingWebhook/c788d456400a4b399b4f191111da8c3fb/ea9e1aa2-6b1f-41e-8afe-fd5539f2bb8b" + # }, + # { + # type = "moogsoft", ---> teams, moogsoft etc + # data = "https://zop.moogsoft.qa/prometheus" + # } + # + # ] } variable "accessibility" { - description = "The list of user access for the account setup" - type = object({ - domain_name = optional(string) - hosted_zone = optional(string) - cidr_blocks = optional(list(string)) - }) + description = "The list of user access for the account setup" + type = object({ + domain_name = optional(string) + hosted_zone = optional(string) + cidr_blocks = optional(list(string)) + }) } variable "cluster_alert_thresholds" { - description = "Cluster alerts threshold configuration." - type = object({ - cpu_utilisation = optional(number) - cpu_underutilisation = optional(number) - node_count = optional(number) - memory_utilisation = optional(number) - memory_underutilisation = optional(number) - pod_count = optional(number) - nginx_5xx_percentage_threshold = optional(number) - disk_utilization = optional(number) - cortex_disk_utilization_threshold = optional(number) - prometheus_disk_utilization_threshold = optional(number) - }) - default = { - cpu_utilisation = 80 - cpu_underutilisation = 20 - node_count = 80 - memory_utilisation = 80 - memory_underutilisation = 20 - pod_count = 80 - nginx_5xx_percentage_threshold = 5 - disk_utilization = 20 - cortex_disk_utilization_threshold = 80 - prometheus_disk_utilization_threshold = 80 - } + description = "Cluster alerts threshold configuration." + type = object({ + cpu_utilisation = optional(number) + cpu_underutilisation = optional(number) + node_count = optional(number) + memory_utilisation = optional(number) + memory_underutilisation = optional(number) + pod_count = optional(number) + nginx_5xx_percentage_threshold = optional(number) + disk_utilization = optional(number) + cortex_disk_utilization_threshold = optional(number) + prometheus_disk_utilization_threshold = optional(number) + }) + default = { + cpu_utilisation = 80 + cpu_underutilisation = 20 + node_count = 80 + memory_utilisation = 80 + memory_underutilisation = 20 + pod_count = 80 + nginx_5xx_percentage_threshold = 5 + disk_utilization = 20 + cortex_disk_utilization_threshold = 80 + prometheus_disk_utilization_threshold = 80 + } } +variable "custom_inbound_ip_range" { + description = "list of custom ip range that are allowed to access services on GKE cluster" + type = list + default = [] +} +variable "pagerduty_url" { + description = "Pagerduty URL to configure your third-party system to send data to Pagerduty" + type = string + default = "" +} variable "pagerduty_integration_key" { - description = "Pagerduty Integration key to send data to Pagerduty" - type = string - default = "" + description = "Pagerduty Integration key to send data to Pagerduty" + type = string + default = "" } variable "namespace_folder_list" { - description = "List of Namespaces configured in the cluster" - type = list(string) - default = [] + description = "List of Namespaces configured in the cluster" + type = list(string) + default = [] +} + +variable "cluster_config" { + description = "Configurations on Cluster" + type = map(any) + default = {} } variable "standard_tags" { - description = "standard tags for resources" - type = object({ - project = optional(string) - provisioner = optional(string) - }) - default = null + description = "standard tags for resources" + type = object ({ + project = optional(string) + provisioner = optional(string) + }) + default = null } variable "common_tags" { - description = "additional tags for merging with common tags" - type = map(string) - default = {} + description = "additional tags for merging with common tags" + type = map(string) + default = {} } variable "observability_config" { - description = "All the configuration related to observability(e.g prometheus, grafana, loki, tempo and cortex)" - type = object({ - suffix = optional(string) - prometheus = optional(object({ - version = optional(string) - enable = bool - persistence = optional(object({ - disk_size = optional(string) - retention_size = optional(string) - retention_duration = optional(string) - })) - remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ - key = optional(string) - value = optional(string) - })) - username = optional(string) - password = optional(string) - }))) - })) - grafana = optional(object({ - version = optional(string) - enable = bool - url = optional(string) - min_replica = optional(number) - max_replica = optional(number) - request_memory = optional(string) - request_cpu = optional(string) - dashboard = optional(object({ - limit_memory = optional(string) - limit_cpu = optional(string) - request_memory = optional(string) - request_cpu = optional(string) - })) - datasource = optional(object({ - limit_memory = optional(string) - limit_cpu = optional(string) - request_memory = optional(string) - request_cpu = optional(string) - })) - persistence = optional(object({ - type = optional(string) - disk_size = optional(string) - deletion_protection = optional(string) - })) - configs = optional(object({ - datasource_list = optional(map(any)) - domains = optional(list(string)) - enable_sso = optional(bool) - })) - gcloud_monitoring = optional(bool) - enabled_users = optional(bool) - })) - kubernetes_event_exporter = optional(object({ - enable = bool - log_level = optional(string) - max_event_age_second = optional(string) - loki_receivers = optional(list(object({ - name = string - url = string - header = optional(object({ - key = string - value = string - })) - cluster_id = optional(string) - }))) - webhook_receivers = optional(list(object({ - name = string - type = string - url = string - header = optional(object({ - key = string - value = string + description = "All the configuration related to observability(e.g prometheus, grafana, loki, tempo and cortex)" + type = object({ + suffix = optional(string) + prometheus = optional(object({ + version = optional(string) + enable = bool + persistence = optional(object({ + disk_size = optional(string) + retention_size = optional(string) + retention_duration = optional(string) + })) + remote_write = optional(list(object({ + host = optional(string) + header = optional(object({ + key = optional(string) + value = optional(string) + })) + username = optional(string) + password = optional(string) + }))) })) - }))) - resource = optional(object({ - limit_cpu = optional(string) - limit_memory = optional(string) - request_cpu = optional(string) - request_memory = optional(string) - })) - })) - loki = optional(object({ - enable = bool - enable_ingress = optional(bool) - alerts = optional(object({ - distributor_lines_received = optional(string) - distributor_bytes_received = optional(number) - distributor_appended_failures = optional(number) - request_errors = optional(number) - panics = optional(number) - request_latency = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) - })) - ingester = optional(object({ - replicas = optional(number) - max_memory = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - min_cpu = optional(string) - autoscaling = optional(bool) - max_replicas = optional(number) - min_replicas = optional(number) - memory_utilization = optional(string) - cpu_utilization = optional(string) - })) - distributor = optional(object({ - replicas = optional(number) - max_memory = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - min_cpu = optional(string) - autoscaling = optional(bool) - max_replicas = optional(number) - min_replicas = optional(number) - memory_utilization = optional(string) - cpu_utilization = optional(string) - })) - querier = optional(object({ - replicas = optional(number) - max_unavailable = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - autoscaling = optional(bool) - max_replicas = optional(number) - min_replicas = optional(number) - memory_utilization = optional(string) - cpu_utilization = optional(string) - })) - query_frontend = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - autoscaling = optional(bool) - max_replicas = optional(number) - min_replicas = optional(number) - memory_utilization = optional(string) - cpu_utilization = optional(string) - })) - })) - cortex = optional(object({ - enable = bool - enable_ingress = optional(bool) - limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_series_per_metric = optional(number) - max_series_per_user = optional(number) - max_fetched_chunks_per_query = optional(number) - })) - query_range = optional(object({ - memcached_client_timeout = optional(string) - })) - compactor = optional(object({ - enable = optional(bool) - replicas = optional(number) - persistence_volume = optional(object({ - enable = optional(bool) - size = optional(string) + grafana = optional(object({ + version = optional(string) + enable = bool + url = optional(string) + min_replica = optional(number) + max_replica = optional(number) + request_memory = optional(string) + request_cpu = optional(string) + dashboard = optional(object({ + limit_memory = optional(string) + limit_cpu = optional(string) + request_memory = optional(string) + request_cpu = optional(string) + })) + datasource = optional(object({ + limit_memory = optional(string) + limit_cpu = optional(string) + request_memory = optional(string) + request_cpu = optional(string) + })) + persistence = optional(object({ + type = optional(string) + disk_size = optional(string) + deletion_protection = optional(string) + })) + configs = optional(object({ + datasource_list = optional(map(any)) + domains = optional(list(string)) + enable_sso = optional(bool) + })) + gcloud_monitoring = optional(bool) + enabled_users = optional(bool) })) - min_cpu = optional(string) - max_cpu = optional(string) - min_memory = optional(string) - max_memory = optional(string) - })) - ingester = optional(object({ - replicas = optional(number) - persistence_volume = optional(object({ - size = optional(string) + kubernetes_event_exporter = optional(object({ + enable = bool + log_level = optional(string) + max_event_age_second = optional(string) + loki_receivers = optional(list(object({ + name = string + url = string + header = optional(object({ + key = string + value = string + })) + cluster_id = optional(string) + }))) + webhook_receivers = optional(list(object({ + name = string + type = string + url = string + header = optional(object({ + key = string + value = string + })) + }))) + resource = optional(object({ + limit_cpu = optional(string) + limit_memory = optional(string) + request_cpu = optional(string) + request_memory = optional(string) + })) })) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - autoscaling = optional(bool) - max_replicas = optional(number) - min_replicas = optional(number) - memory_utilization = optional(string) - })) - querier = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - autoscaling = optional(bool) - max_replicas = optional(number) - min_replicas = optional(number) - memory_utilization = optional(string) - cpu_utilization = optional(string) - })) - query_frontend = optional(object({ - replicas = optional(number) - enable = optional(bool) - })) - store_gateway = optional(object({ - replication_factor = optional(number) - replicas = optional(number) - persistence_volume = optional(object({ - size = optional(string) + loki = optional(object({ + enable = bool + enable_ingress = optional(bool) + alerts = optional(object({ + distributor_lines_received = optional(string) + distributor_bytes_received= optional(number) + distributor_appended_failures = optional(number) + request_errors = optional(number) + panics = optional(number) + request_latency = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) + })) + ingester = optional(object({ + replicas = optional(number) + max_memory = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + min_cpu = optional(string) + autoscaling = optional(bool) + max_replicas = optional(number) + min_replicas = optional(number) + memory_utilization = optional(string) + cpu_utilization = optional(string) + })) + distributor = optional(object({ + replicas = optional(number) + max_memory = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + min_cpu = optional(string) + autoscaling = optional(bool) + max_replicas = optional(number) + min_replicas = optional(number) + memory_utilization = optional(string) + cpu_utilization = optional(string) + })) + querier = optional(object({ + replicas = optional(number) + max_unavailable = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + autoscaling = optional(bool) + max_replicas = optional(number) + min_replicas = optional(number) + memory_utilization = optional(string) + cpu_utilization = optional(string) + })) + query_frontend = optional(object({ + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + autoscaling = optional(bool) + max_replicas = optional(number) + min_replicas = optional(number) + memory_utilization = optional(string) + cpu_utilization = optional(string) + })) })) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - - })) - memcached_frontend = optional(object({ - enable = optional(bool) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - })) - memcached_blocks_index = optional(object({ - enable = optional(bool) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - })) - memcached_blocks = optional(object({ - enable = optional(bool) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - })) - memcached_blocks_metadata = optional(object({ - enable = optional(bool) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - })) - distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - autoscaling = optional(bool) - min_replicas = optional(number) - max_replicas = optional(number) - memory_utilization = optional(string) - cpu_utilization = optional(string) - })) - })) - mimir = optional(object({ - enable = bool - enable_ingress = optional(bool) - alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) - })) - limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_fetched_chunks_per_query = optional(number) - max_cache_freshness = optional(string) - max_outstanding_requests_per_tenant = optional(number) - })) - compactor = optional(object({ - replicas = optional(number) - persistence_volume = optional(object({ - enable = optional(bool) - size = optional(string) + cortex = optional(object({ + enable = bool + enable_ingress = optional(bool) + limits = optional(object({ + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_series_per_metric = optional(number) + max_series_per_user = optional(number) + max_fetched_chunks_per_query = optional(number) + })) + query_range = optional(object({ + memcached_client_timeout = optional(string) + })) + compactor = optional(object({ + enable = optional(bool) + replicas = optional(number) + persistence_volume = optional(object({ + enable = optional(bool) + size = optional(string) + })) + min_cpu = optional(string) + max_cpu = optional(string) + min_memory = optional(string) + max_memory = optional(string) + })) + ingester = optional(object({ + replicas = optional(number) + persistence_volume = optional(object({ + size = optional(string) + })) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + autoscaling = optional(bool) + max_replicas = optional(number) + min_replicas = optional(number) + memory_utilization = optional(string) + })) + querier = optional(object({ + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + autoscaling = optional(bool) + max_replicas = optional(number) + min_replicas = optional(number) + memory_utilization = optional(string) + cpu_utilization = optional(string) + })) + query_frontend = optional(object({ + replicas = optional(number) + enable = optional(bool) + })) + store_gateway = optional(object({ + replication_factor = optional(number) + replicas = optional(number) + persistence_volume = optional(object({ + size = optional(string) + })) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + + })) + memcached_frontend = optional(object({ + enable = optional(bool) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + })) + memcached_blocks_index = optional(object({ + enable = optional(bool) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + })) + memcached_blocks = optional(object({ + enable = optional(bool) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + })) + memcached_blocks_metadata = optional(object({ + enable = optional(bool) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + })) + distributor = optional(object({ + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + autoscaling = optional(bool) + min_replicas = optional(number) + max_replicas = optional(number) + memory_utilization = optional(string) + cpu_utilization = optional(string) + })) })) - min_cpu = optional(string) - max_cpu = optional(string) - min_memory = optional(string) - max_memory = optional(string) - })) - ingester = optional(object({ - replicas = optional(number) - persistence_volume = optional(object({ - size = optional(string) + mimir = optional(object({ + enable = bool + enable_ingress = optional(bool) + alerts = optional(object({ + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) + })) + limits = optional(object({ + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_fetched_chunks_per_query = optional(number) + max_cache_freshness = optional(string) + max_outstanding_requests_per_tenant = optional(number) + })) + compactor = optional(object({ + replicas = optional(number) + persistence_volume = optional(object({ + enable = optional(bool) + size = optional(string) + })) + min_cpu = optional(string) + max_cpu = optional(string) + min_memory = optional(string) + max_memory = optional(string) + })) + ingester = optional(object({ + replicas = optional(number) + persistence_volume = optional(object({ + size = optional(string) + })) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + })) + querier = optional(object({ + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + })) + query_frontend = optional(object({ + replicas = optional(number) + })) + store_gateway = optional(object({ + replication_factor = optional(number) + replicas = optional(number) + persistence_volume = optional(object({ + size = optional(string) + })) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + })) + distributor = optional(object({ + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + })) })) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - })) - querier = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - })) - query_frontend = optional(object({ - replicas = optional(number) - })) - store_gateway = optional(object({ - replication_factor = optional(number) - replicas = optional(number) - persistence_volume = optional(object({ - size = optional(string) + tempo = optional(object({ + enable = bool + enable_ingress = optional(bool) + alerts = optional(object({ + ingester_bytes_received = optional(number) + distributor_ingester_appends = optional(number) + distributor_ingester_append_failures = optional(number) + ingester_live_traces = optional(number) + distributor_spans_received = optional(number) + distributor_bytes_received = optional(number) + ingester_blocks_flushed = optional(number) + tempodb_blocklist = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) + })) + max_receiver_msg_size = optional(number) + ingester = optional(object({ + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + autoscaling = optional(bool) + max_replicas = optional(number) + min_replicas = optional(number) + memory_utilization = optional(string) + cpu_utilization = optional(string) + })) + distributor = optional(object({ + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + autoscaling = optional(bool) + max_replicas = optional(number) + min_replicas = optional(number) + memory_utilization = optional(string) + cpu_utilization = optional(string) + + })) + querier = optional(object({ + replicas = optional(number) + })) + query_frontend = optional(object({ + replicas = optional(number) + })) + metrics_generator = optional(object({ + enable = optional(bool) + replicas = optional(number) + service_graphs_max_items = optional(number) + service_graphs_wait = optional(string) + remote_write_flush_deadline = optional(string) + remote_write = optional(list(object({ + host = optional(string) + header = optional(object({ + key = optional(string) + value = optional(string) + })) + }))) + metrics_ingestion_time_range_slack = optional(string) + })) })) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - })) - distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - })) - })) - tempo = optional(object({ - enable = bool - enable_ingress = optional(bool) - alerts = optional(object({ - ingester_bytes_received = optional(number) - distributor_ingester_appends = optional(number) - distributor_ingester_append_failures = optional(number) - ingester_live_traces = optional(number) - distributor_spans_received = optional(number) - distributor_bytes_received = optional(number) - ingester_blocks_flushed = optional(number) - tempodb_blocklist = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) - })) - max_receiver_msg_size = optional(number) - ingester = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - autoscaling = optional(bool) - max_replicas = optional(number) - min_replicas = optional(number) - memory_utilization = optional(string) - cpu_utilization = optional(string) - })) - distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - autoscaling = optional(bool) - max_replicas = optional(number) - min_replicas = optional(number) - memory_utilization = optional(string) - cpu_utilization = optional(string) - - })) - querier = optional(object({ - replicas = optional(number) - })) - query_frontend = optional(object({ - replicas = optional(number) - })) - metrics_generator = optional(object({ - enable = optional(bool) - replicas = optional(number) - service_graphs_max_items = optional(number) - service_graphs_wait = optional(string) - remote_write_flush_deadline = optional(string) - remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ - key = optional(string) - value = optional(string) - })) - }))) - metrics_ingestion_time_range_slack = optional(string) - })) - })) - openobserve = optional(list(object({ - enable = bool - name = string - replicaCount = optional(number, 2) - min_cpu = optional(string, "500m") - max_cpu = optional(string, "1") - min_memory = optional(string, "512Mi") - max_memory = optional(string, "1Gi") - enable_ingress = optional(bool, false) - env = optional(list(object({ - name = string - value = string - })), []) - })), []) - }) - default = null + openobserve = optional(list(object({ + enable = bool + name = string + replicaCount = optional(number, 2) + min_cpu = optional(string, "500m") + max_cpu = optional(string, "1") + min_memory = optional(string, "512Mi") + max_memory = optional(string, "1Gi") + enable_ingress = optional(bool, false) + env = optional(list(object({ + name = string + value = string + })), []) + })), []) + }) + default = null +} + +variable "provisioner" { + description = "Provisioner being used to setup Infra" + type = string + default = "zop-dev" } variable "shared_service_provider" { - description = "Shared Service Provider ID" - type = string + description = "Shared Service Provider ID" + type = string } -variable "cert_issuer_config" { - description = "email to be added as cert-manager issuer" - type = object({ - env = optional(string) - email = string - }) +variable "cert_issuer_config"{ + description = "email to be added as cert-manager issuer" + type = object({ + env = optional(string) + email = string + }) } variable "fluent_bit" { - description = "Inputs for Fluent Bit configurations" - type = object({ - enable = string - loki = optional(list(object({ - host = string - tenant_id = optional(string) - labels = string - port = optional(number) - tls = optional(string) - }))) - http = optional(list(object({ - host = string - port = optional(number) - uri = optional(string) - headers = optional(list(object({ - key = string - value = string - }))) - tls = optional(string) - tls_verify = optional(string) - }))) - splunk = optional(list(object({ - host = string - token = string - port = optional(number) - tls = optional(string) - tls_verify = optional(string) - }))) - datadog = optional(list(object({ - host = string - api_key = string - tls = optional(string) - compress = optional(string) - }))) - new_relic = optional(list(object({ - host = optional(string) - api_key = string - compress = optional(string) - }))) - slack = optional(list(object({ - webhook = string - }))) - }) - default = null + description = "Inputs for Fluent Bit configurations" + type = object({ + enable = string + loki = optional(list(object({ + host = string + tenant_id = optional(string) + labels = string + port = optional(number) + tls = optional(string) + }))) + http = optional(list(object({ + host = string + port = optional(number) + uri = optional(string) + headers = optional(list(object({ + key = string + value = string + }))) + tls = optional(string) + tls_verify = optional(string) + }))) + splunk = optional(list(object({ + host = string + token = string + port = optional(number) + tls = optional(string) + tls_verify = optional(string) + }))) + datadog = optional(list(object({ + host = string + api_key = string + tls = optional(string) + compress = optional(string) + }))) + new_relic = optional(list(object({ + host = optional(string) + api_key = string + compress = optional(string) + }))) + slack = optional(list(object({ + webhook = string + }))) + }) + default = null } variable "cluster_deletion_protection" { - type = bool - default = false + type = bool + default = false } variable "slack_alerts_configs" { - type = list(object({ - channel = string - name = string - url = string - labels = optional(map(string)) - })) - default = [] + type = list(object({ + channel = string + name = string + url = string + labels = optional(map(string)) + })) + default = [] } variable "webhook_alerts_configs" { - type = list(object({ - name = string - url = string - send_resolved = optional(bool, true) - labels = optional(map(string)) - })) - default = [] + type = list(object({ + name = string + url = string + send_resolved = optional(bool, true) + labels = optional(map(string)) + })) + default = [] } variable "karpenter_configs" { - description = "Inputs for karpenter - enabling flag, GCP machine types, and capacity types ('on-demand' or 'spot')" - - type = object({ - enable = bool - machine_types = list(string) - capacity_types = list(string) - }) - default = { - enable = false - machine_types = [] - capacity_types = [] - } - - validation { - condition = alltrue([ - for t in var.karpenter_configs.capacity_types : - contains(["on-demand", "spot"], t) - ]) - error_message = "Capacity type can only be either 'on-demand' or 'spot'" - } + description = "Inputs for karpenter - enabling flag, GCP machine types, and capacity types ('on-demand' or 'spot')" + + type = object({ + enable = bool + machine_types = list(string) + capacity_types = list(string) + }) + default = { + enable = false + machine_types = [] + capacity_types = [] + } + + validation { + condition = alltrue([ + for t in var.karpenter_configs.capacity_types : + contains(["on-demand", "spot"], t) + ]) + error_message = "Capacity type can only be either 'on-demand' or 'spot'" + } } \ No newline at end of file diff --git a/k8s/gcp/namespace/badger-db.tf b/k8s/gcp/namespace/badger-db.tf index 7d6087da..68cfef4a 100644 --- a/k8s/gcp/namespace/badger-db.tf +++ b/k8s/gcp/namespace/badger-db.tf @@ -1,7 +1,7 @@ locals { badger_db_volume_mounts_services = tomap({ for k, v in var.services : k => { - (k) = { + "${k}" = { mount_path = "/etc/data" } } if coalesce(v.badger_db, false) @@ -9,7 +9,7 @@ locals { badger_db_volume_mounts_crons = tomap({ for k, v in var.cron_jobs : k => { - (k) = { + "${k}" = { mount_path = "/etc/data" } } if coalesce(v.badger_db, false) diff --git a/k8s/gcp/namespace/kubernetes.tf b/k8s/gcp/namespace/kubernetes.tf index a084e9cb..71c20501 100644 --- a/k8s/gcp/namespace/kubernetes.tf +++ b/k8s/gcp/namespace/kubernetes.tf @@ -2,10 +2,10 @@ locals { cluster_prefix = var.cluster_prefix != "" ? var.cluster_prefix : "${var.provider_id}/${var.app_env}/${var.app_name}" } data "terraform_remote_state" "infra_output" { - backend = "gcs" + backend = "gcs" config = { - bucket = var.bucket_name - prefix = "${local.cluster_prefix}/terraform.tfstate" + bucket = var.bucket_name + prefix = "${local.cluster_prefix}/terraform.tfstate" } } @@ -24,8 +24,8 @@ data "google_container_cluster" "gke" { provider "kubernetes" { host = "https://${data.terraform_remote_state.infra_output.outputs.kubernetes_endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(data.terraform_remote_state.infra_output.outputs.ca_certificate) } @@ -33,8 +33,8 @@ provider "kubectl" { load_config_file = false host = "https://${data.terraform_remote_state.infra_output.outputs.kubernetes_endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(data.terraform_remote_state.infra_output.outputs.ca_certificate) } @@ -42,8 +42,8 @@ provider "helm" { kubernetes { host = "https://${data.terraform_remote_state.infra_output.outputs.kubernetes_endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(data.terraform_remote_state.infra_output.outputs.ca_certificate) } } diff --git a/k8s/gcp/namespace/nginx.tf b/k8s/gcp/namespace/nginx.tf index 64b4b7d5..b8a9cdb8 100644 --- a/k8s/gcp/namespace/nginx.tf +++ b/k8s/gcp/namespace/nginx.tf @@ -4,42 +4,42 @@ locals { default_domain_list = merge([ for service, service_config in var.services : { - (service) = { - ingress = ["${split(":", service)[0]}-${var.namespace}.${local.domain_name}"] + "${ service }" = { + ingress = ["${split(":", service)[0]}-${var.namespace}.${local.domain_name}"] basic_auth = (service_config.enable_basic_auth != null ? service_config.enable_basic_auth : false) ? true : false } - } if(coalesce(var.services[service].enable_default_ingress, false) == true) + } if (coalesce(var.services[service].enable_default_ingress, false) == true) ]...) service_custom_domain_list = merge([ for service, config in var.services : tomap({ - for host in config.ingress_list : "${service}-${var.namespace}-${host}" => { - service_name = split(":", service)[0] - service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] - ingress_host = split("/", host)[0] - path_based_routing = (length(split("/", host)) > 1 - ? join("/", slice(split("/", host), 1, length(split("/", host)))) - : "") - ns = var.namespace - ingress_name = "${split(":", service)[0]}-${(replace(host, "/", "-"))}-ingress" - basic_auth = (config.enable_basic_auth != null ? config.enable_basic_auth : false) ? true : false - nginx_rewrite = config.nginx_rewrite != null ? config.nginx_rewrite : true + for host in config.ingress_list : "${service}-${var.namespace}-${host}" => { + service_name = split(":", service)[0] + service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] + ingress_host = split("/", host)[0] + path_based_routing = ( length(split("/", host)) > 1 + ? join("/", slice(split("/", host) , 1, length(split("/", host)))) + : "") + ns = var.namespace + ingress_name = "${split(":", service)[0]}-${(replace(host, "/", "-"))}-ingress" + basic_auth = (config.enable_basic_auth != null ? config.enable_basic_auth : false) ? true : false + nginx_rewrite = config.nginx_rewrite != null ? config.nginx_rewrite : true } # Exclude wildcard hosts from custom host logic if !can(regex("^\\*\\.", split("/", host)[0])) - }) if try(length(var.services[service].ingress_list), 0) != 0 + })if try(length(var.services[service].ingress_list),0) != 0 ]...) default_services_list = merge([ for service in keys(local.default_domain_list) : { - for ingress_name in local.default_domain_list[service].ingress : "${service}-${var.namespace}-${ingress_name}" => { + for ingress_name in local.default_domain_list[service].ingress : "${service}-${var.namespace}-${ingress_name}" => { service_name = split(":", service)[0] service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] # domain_name backward compatible with namespace based names if app_env is not given, if app_env is given then new scheme is chosen domain_name = ingress_name ns = var.namespace ingress_name = "${split(":", service)[0]}-${ingress_name}-ingress" - basic_auth = local.default_domain_list[service].basic_auth + basic_auth = local.default_domain_list[service].basic_auth } } ]...) @@ -47,44 +47,44 @@ locals { wildcard_custom_hosts = merge([ for service, config in var.services : tomap({ for host in try(config.ingress_list, []) : - "${service}-${var.namespace}-${host}" => { - service_name = split(":", service)[0] - service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] - ingress_host = split("/", host)[0] - ns = var.namespace - ingress_name = lower(replace("${split(":", service)[0]}-${replace(host, "/", "-")}-wildcard-ingress", "*", "wildcard")) - base_domain = replace(split("/", host)[0], "*.", "") - } - if can(regex("^\\*\\.", split("/", host)[0])) + "${service}-${var.namespace}-${host}" => { + service_name = split(":", service)[0] + service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] + ingress_host = split("/", host)[0] + ns = var.namespace + ingress_name = lower(replace("${split(":", service)[0]}-${replace(host, "/", "-")}-wildcard-ingress", "*", "wildcard")) + base_domain = replace(split("/", host)[0], "*.", "") + } + if can(regex("^\\*\\.", split("/", host)[0])) }) if try(length(config.ingress_list), 0) != 0 ]...) } resource "random_password" "basic_auth_password" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } length = 32 special = true override_special = "_@" } resource "random_string" "basic_auth_user_name_suffix" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - length = 6 - special = true - upper = false - numeric = false + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + length = 6 + special = true + upper = false + numeric = false min_special = 2 - lower = true + lower = true } resource "google_secret_manager_secret" "basic_auth_credentials" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - provider = google-beta - project = var.provider_id - secret_id = "${local.cluster_name}-${var.namespace}-${each.key}-basic-auth-credentials" - labels = local.common_tags + provider = google-beta + project = var.provider_id + secret_id = "${local.cluster_name}-${var.namespace}-${each.key}-basic-auth-credentials" + labels = local.common_tags replication { automatic = true @@ -92,28 +92,28 @@ resource "google_secret_manager_secret" "basic_auth_credentials" { } resource "google_secret_manager_secret_iam_member" "basic_auth_credentials" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - project = var.provider_id - secret_id = google_secret_manager_secret.basic_auth_credentials[each.key].secret_id - role = "roles/secretmanager.secretAccessor" - member = "serviceAccount:${data.google_project.this.number}-compute@developer.gserviceaccount.com" + project = var.provider_id + secret_id = google_secret_manager_secret.basic_auth_credentials[each.key].secret_id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.this.number}-compute@developer.gserviceaccount.com" } resource "google_secret_manager_secret_version" "basic_auth_credentials" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - secret = google_secret_manager_secret.basic_auth_credentials[each.key].id - secret_data = jsonencode({ user_name = "${each.key}-${random_string.basic_auth_user_name_suffix[each.key].result}", - password = random_password.basic_auth_password[each.key].result }) - depends_on = [google_secret_manager_secret.basic_auth_credentials] + secret = google_secret_manager_secret.basic_auth_credentials[each.key].id + secret_data = jsonencode({ user_name = "${each.key}-${random_string.basic_auth_user_name_suffix[each.key].result}", + password = random_password.basic_auth_password[each.key].result }) + depends_on = [google_secret_manager_secret.basic_auth_credentials] } resource "kubernetes_secret_v1" "basic_auth_secret" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false} metadata { - name = "${each.key}-basic-auth" + name = "${each.key}-basic-auth" namespace = var.namespace } data = { @@ -123,15 +123,15 @@ resource "kubernetes_secret_v1" "basic_auth_secret" { } resource "kubernetes_ingress_v1" "default_service_ingress" { - for_each = { for service, value in local.default_services_list : service => value } + for_each = {for service, value in local.default_services_list : service => value } metadata { name = each.value.ingress_name namespace = each.value.ns annotations = { - "kubernetes.io/ingress.class" = "nginx" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "kubernetes.io/ingress.class" = "nginx" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -160,17 +160,17 @@ resource "kubernetes_ingress_v1" "default_service_ingress" { } resource "kubernetes_ingress_v1" "custom_service_ingress" { - for_each = { for service, value in local.service_custom_domain_list : service => value if value.path_based_routing == "" } + for_each = {for service, value in local.service_custom_domain_list : service => value if value.path_based_routing == "" } metadata { name = each.value.ingress_name namespace = each.value.ns annotations = { - "kubernetes.io/ingress.class" = "nginx" - "cert-manager.io/issuer" = "letsencrypt" - "kubernetes.io/tls-acme" = "true" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "kubernetes.io/ingress.class" = "nginx" + "cert-manager.io/issuer" = "letsencrypt" + "kubernetes.io/tls-acme" = "true" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } @@ -192,15 +192,15 @@ resource "kubernetes_ingress_v1" "custom_service_ingress" { } } tls { - secret_name = "tls-secret-${each.value.ingress_host}" - hosts = [each.value.ingress_host] + secret_name ="tls-secret-${each.value.ingress_host}" + hosts =[each.value.ingress_host] } } depends_on = [kubernetes_namespace.app_environments] } resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { - for_each = { for service, value in local.service_custom_domain_list : service => value if value.path_based_routing != "" } + for_each = {for service, value in local.service_custom_domain_list : service => value if value.path_based_routing != "" } metadata { name = each.value.ingress_name namespace = each.value.ns @@ -209,10 +209,10 @@ resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { "cert-manager.io/issuer" = "letsencrypt" "kubernetes.io/tls-acme" = "true" "nginx.ingress.kubernetes.io/use-regex" = "true" - "nginx.ingress.kubernetes.io/rewrite-target" = each.value.nginx_rewrite ? "/$2" : "" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" - "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/rewrite-target" = each.value.nginx_rewrite ? "/$2" : "" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } @@ -234,8 +234,8 @@ resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { } } tls { - secret_name = "tls-secret-${each.value.ingress_host}" - hosts = [each.value.ingress_host] + secret_name ="tls-secret-${each.value.ingress_host}" + hosts =[each.value.ingress_host] } } depends_on = [kubernetes_namespace.app_environments] @@ -289,11 +289,11 @@ locals { ingress_tls_secrets = merge([ for service_name, service in var.services : { for idx, ingress in try(service.ingress_with_secret, []) : - "${service_name}-${idx}" => { - host = ingress.host - tls_crt_key = ingress.cloud_secret.tls_crt_key - tls_key_key = ingress.cloud_secret.tls_key_key - } + "${service_name}-${idx}" => { + host = ingress.host + tls_crt_key = ingress.cloud_secret.tls_crt_key + tls_key_key = ingress.cloud_secret.tls_key_key + } } ]...) } @@ -327,12 +327,12 @@ resource "kubernetes_ingress_v1" "service_ingress_with_secret" { for_each = merge([ for service_name, service in var.services : { for idx, ingress in try(service.ingress_with_secret, []) : - "${service_name}-${idx}" => { - service_name = service_name - host = ingress.host - cloud_secret = ingress.cloud_secret - service_port = try(ingress.service_port, 80) - } + "${service_name}-${idx}" => { + service_name = service_name + host = ingress.host + cloud_secret = ingress.cloud_secret + service_port = try(ingress.service_port, 80) + } } ]...) diff --git a/k8s/gcp/namespace/sql.tf b/k8s/gcp/namespace/sql.tf index 9100cf31..ef74a983 100644 --- a/k8s/gcp/namespace/sql.tf +++ b/k8s/gcp/namespace/sql.tf @@ -1,32 +1,34 @@ locals { cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" + cluster_name_parts = split("-", local.cluster_name) + environment = var.app_env - secondary_ip = [for subnet in data.google_compute_subnetwork.app_subnet.secondary_ip_range : subnet.ip_cidr_range] - ext_rds_sg_cidr_block = concat([data.google_compute_subnetwork.app_subnet.ip_cidr_range], local.secondary_ip, var.ext_rds_sg_cidr_block) + secondary_ip = [for subnet in data.google_compute_subnetwork.app_subnet.secondary_ip_range : subnet.ip_cidr_range] + ext_rds_sg_cidr_block = concat([data.google_compute_subnetwork.app_subnet.ip_cidr_range], local.secondary_ip , var.ext_rds_sg_cidr_block) enable_db = try(var.sql_db.enable, false) - db_list = distinct(concat(distinct([for key, value in var.services : value.db_name]), distinct([for key, value in var.cron_jobs : value.db_name]))) + db_list = distinct(concat(distinct([for key, value in var.services: value.db_name]), distinct([for key, value in var.cron_jobs: value.db_name]))) - common_tags = merge(var.common_tags, + common_tags = merge(var.common_tags, tomap({ - project = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name, local.cluster_name) + project = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name ,local.cluster_name) provisioner = try(var.standard_tags.provisioner != null ? var.standard_tags.provisioner : "zop-dev", "zop-dev") - })) + })) grouped_database_map = merge( { for service_key, service_value in var.services : service_value.datastore_configs.name => [ - service_value.datastore_configs.databse - ]... + service_value.datastore_configs.databse + ]... if try(service_value.datastore_configs.name, null) != null && try(service_value.datastore_configs.databse, null) != null }, { for cron_key, cron_value in var.cron_jobs : cron_value.datastore_configs.name => [ - cron_value.datastore_configs.databse - ]... + cron_value.datastore_configs.databse + ]... if try(cron_value.datastore_configs.name, null) != null && try(cron_value.datastore_configs.databse, null) != null } @@ -44,7 +46,7 @@ data "google_project" "this" {} data "google_client_config" "default" {} data "google_compute_network" "vpc" { - name = var.vpc + name = var.vpc } data "google_compute_subnetwork" "app_subnet" { @@ -53,8 +55,8 @@ data "google_compute_subnetwork" "app_subnet" { } module "sql_db" { - source = "../../../sql/gcp-sql" - count = local.enable_db == true ? 1 : 0 + source = "../../../sql/gcp-sql" + count = local.enable_db == true ? 1 : 0 project_id = var.provider_id project_number = data.google_project.this.number @@ -83,7 +85,7 @@ module "sql_db" { resource "kubernetes_service" "db_service" { - count = var.sql_db == null ? 0 : 1 + count = var.sql_db == null ? 0 : 1 metadata { name = "${var.namespace}-sql" namespace = "db" @@ -98,7 +100,7 @@ resource "kubernetes_service" "db_service" { } module "sql_db_v2" { - source = "../../../sql/gcp-sql" + source = "../../../sql/gcp-sql" for_each = var.sql_list != null ? var.sql_list : {} @@ -131,7 +133,7 @@ module "sql_db_v2" { resource "kubernetes_service" "sql_db_service_v2" { for_each = var.sql_list != null ? var.sql_list : {} - + metadata { name = "${each.key}-sql" namespace = "db" diff --git a/k8s/gcp/namespace/vars.tf b/k8s/gcp/namespace/vars.tf index 78fdfbe7..49aa5e13 100644 --- a/k8s/gcp/namespace/vars.tf +++ b/k8s/gcp/namespace/vars.tf @@ -32,6 +32,12 @@ variable "subnet" { default = "" } +variable "cluster_key" { + description = "Path for terraform state file of cluster" + type = string + default = "" +} + variable "namespace" { description = "Namespace of the Services to be deployed" type = string @@ -40,86 +46,86 @@ variable "namespace" { variable "services" { description = "Map of services to be deployed within the namespace" - type = map(object({ - repo_name = optional(string) - gar_name = optional(string) - gar_project = optional(string) - nginx_rewrite = optional(bool) - db_name = optional(string) - pub_sub = optional(bool) - topics = optional(list(string)) - subscriptions = optional(list(string)) - redis = optional(bool) - local_redis = optional(bool) - service_account = optional(string) - custom_secrets = optional(list(string)) - ingress_list = optional(list(string)) + type = map(object({ + repo_name = optional(string) + gar_name = optional(string) + gar_project = optional(string) + nginx_rewrite = optional(bool) + db_name = optional(string) + pub_sub = optional(bool) + topics = optional(list(string)) + subscriptions = optional(list(string)) + redis = optional(bool) + local_redis = optional(bool) + service_account= optional(string) + custom_secrets = optional(list(string)) + ingress_list = optional(list(string)) enable_basic_auth = optional(bool) enable_default_ingress = optional(bool) - badger_db = optional(bool) + badger_db = optional(bool) datastore_configs = optional(object({ - name = optional(string) + name = optional(string) databse = optional(string) })) redis_configs = optional(object({ name = optional(string) port = optional(number) })) - helm_configs = optional(object({ + helm_configs = optional(object({ image_pull_secrets = optional(list(string)) image = optional(string) - replica_count = optional(number) - cli_service = optional(bool) - http_port = optional(string) - metrics_port = optional(string) - min_cpu = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - min_available = optional(number) - heartbeat_url = optional(string) - ports = optional(map(any)) - env = optional(map(any)) - env_list = optional(list(object({ + replica_count = optional(number) + cli_service = optional(bool) + http_port = optional(string) + metrics_port = optional(string) + min_cpu = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + min_available = optional(number) + heartbeat_url = optional(string) + ports = optional(map(any)) + env = optional(map(any)) + env_list = optional(list(object({ name = string value = string }))) - command = optional(list(string)) - configmaps_list = optional(list(string)) - secrets_list = optional(list(string)) - hpa = optional(object({ - enable = optional(bool) - min_replicas = optional(number) - max_replicas = optional(number) - cpu_limit = optional(number) - memory_limit = optional(number) + command = optional(list(string)) + configmaps_list = optional(list(string)) + secrets_list = optional(list(string)) + hpa = optional(object({ + enable = optional(bool) + min_replicas = optional(number) + max_replicas = optional(number) + cpu_limit = optional(number) + memory_limit = optional(number) })) - readiness_probes = optional(object({ + readiness_probes = optional(object({ enable = optional(bool) initial_delay_seconds = optional(number) period_seconds = optional(number) timeout_seconds = optional(number) failure_threshold = optional(number) })) - liveness_probes = optional(object({ + liveness_probes = optional(object({ enable = optional(bool) initial_delay_seconds = optional(number) period_seconds = optional(number) timeout_seconds = optional(number) failure_threshold = optional(number) })) - volume_mounts = optional(object({ - configmaps = optional(map(object({ + volume_mounts = optional(object({ + configmaps = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - secrets = optional(map(object({ + secrets = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - pvc = optional(map(object({ + pvc = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) @@ -133,83 +139,83 @@ variable "services" { cpu_utilisation_threshold = optional(number) })) custom_alerts = optional(list(object({ - name = string - description = string - alertRule = string - sumByLabel = optional(string) - percentile = optional(number) - labelValue = optional(string) - queryOperator = optional(string) - timeWindow = optional(string) - threshold = number + name = string + description = string + alertRule = string + sumByLabel = optional(string) + percentile = optional(number) + labelValue = optional(string) + queryOperator = optional(string) + timeWindow = optional(string) + threshold = number custom_expression = optional(string) - labels = optional(map(string)) + labels = optional(map(string)) }))) })) ingress_with_secret = optional(list(object({ - host = string + host = string cloud_secret = object({ tls_crt_key = string tls_key_key = string }) })), []) })) - default = {} + default = {} } variable "cron_jobs" { description = "Map of cron jobs to be executed within the namespace" - type = map(object({ - repo_name = optional(string) - gar_name = optional(string) - gar_project = optional(string) - db_name = optional(string) - topics = optional(list(string)) - subscriptions = optional(list(string)) - pub_sub = optional(bool) - redis = optional(bool) - local_redis = optional(bool) - service_account = optional(string) - custom_secrets = optional(list(string)) - ingress_list = optional(list(string)) + type = map(object({ + repo_name = optional(string) + gar_name = optional(string) + gar_project = optional(string) + db_name = optional(string) + topics = optional(list(string)) + subscriptions = optional(list(string)) + pub_sub = optional(bool) + redis = optional(bool) + local_redis = optional(bool) + service_account= optional(string) + custom_secrets = optional(list(string)) + ingress_list = optional(list(string)) enable_basic_auth = optional(bool) enable_default_ingress = optional(bool) - badger_db = optional(bool) + badger_db = optional(bool) datastore_configs = optional(object({ - name = optional(string) + name = optional(string) databse = optional(string) })) redis_configs = optional(object({ name = optional(string) port = optional(number) })) - helm_configs = optional(object({ + helm_configs = optional(object({ image_pull_secrets = optional(list(string)) image = optional(string) schedule = string suspend = optional(bool) concurrency_policy = optional(string) - http_port = optional(string) - metrics_port = optional(string) - min_cpu = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - env = optional(map(any)) - env_list = optional(list(object({ + http_port = optional(string) + metrics_port = optional(string) + min_cpu = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + env = optional(map(any)) + env_list = optional(list(object({ name = string value = string }))) - command = optional(list(string)) - configmaps_list = optional(list(string)) - secrets_list = optional(list(string)) - volume_mounts = optional(object({ - configmaps = optional(map(object({ + command = optional(list(string)) + configmaps_list = optional(list(string)) + secrets_list = optional(list(string)) + volume_mounts = optional(object({ + configmaps = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - secrets = optional(map(object({ + secrets = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) @@ -220,13 +226,13 @@ variable "cron_jobs" { })) })) })) - default = {} + default = {} } variable "user_access" { description = "List of users who will have access to clusters" type = object({ - admins = optional(list(string)) + admins = optional(list(string)) viewers = optional(list(string)) editors = optional(list(string)) }) @@ -245,19 +251,19 @@ variable "artifact_users" { variable "sql_db" { description = "Inputs to provision SQL instance" - type = object( + type = object( { - enable = optional(bool) - machine_type = optional(string) - disk_size = optional(number) - type = optional(string) - availability_type = optional(string) - deletion_protection = optional(bool) - read_replica = optional(bool) - activation_policy = optional(string) - db_collation = optional(string) - enable_ssl = optional(bool) - sql_version = optional(string) + enable = optional(bool) + machine_type = optional(string) + disk_size = optional(number) + type = optional(string) + availability_type = optional(string) + deletion_protection = optional(bool) + read_replica = optional(bool) + activation_policy = optional(string) + db_collation = optional(string) + enable_ssl = optional(bool) + sql_version = optional(string) } ) default = null @@ -265,7 +271,7 @@ variable "sql_db" { variable "local_redis" { description = "Inputs to provision Redis instance within the cluster as a statefulset." - type = object( + type = object( { enable = bool disk_size = optional(string) @@ -281,7 +287,7 @@ variable "local_redis" { variable "cassandra_db" { description = "Inputs to provision Cassandra instances" - type = object( + type = object( { admin_user = string replica_count = number @@ -315,7 +321,7 @@ variable "ext_rds_sg_cidr_block" { variable "standard_tags" { description = "standard tags for resources" - type = object({ + type = object ({ project = optional(string) provisioner = optional(string) }) @@ -367,16 +373,22 @@ variable "helm_charts" { variable "cluster_prefix" { description = "prefix for cluster terraform state file" + type = string + default = "" +} + +variable "provisioner" { + description = "Provisioner being used to setup Infra" type = string - default = "" + default = "zop-dev" } variable "pub_sub" { - type = bool + type = bool default = false } -variable "cert_issuer_config" { +variable "cert_issuer_config"{ description = "email to be added as cert-manager issuer" type = object({ env = optional(string) @@ -386,17 +398,17 @@ variable "cert_issuer_config" { variable "sql_list" { type = map(object({ - type = optional(string) - sql_version = optional(string) - machine_type = optional(string) - enable_ssl = optional(string) - availability_type = optional(string) - db_collation = optional(string) - activation_policy = optional(string) - deletion_protection = optional(string) - read_replica = optional(string) - disk_autoresize = optional(string) - disk_size = optional(string) + type = optional(string) + sql_version = optional(string) + machine_type = optional(string) + enable_ssl = optional(string) + availability_type = optional(string) + db_collation = optional(string) + activation_policy = optional(string) + deletion_protection = optional(string) + read_replica = optional(string) + disk_autoresize = optional(string) + disk_size = optional(string) })) default = null } \ No newline at end of file diff --git a/k8s/gcp/nginx/vars.tf b/k8s/gcp/nginx/vars.tf index 948c0228..01c069e9 100644 --- a/k8s/gcp/nginx/vars.tf +++ b/k8s/gcp/nginx/vars.tf @@ -1,12 +1,32 @@ +variable "project" { + description = "Project ID where the resources to be created" + type = string +} -variable "app_name" { - description = "AppDynamics Controller URL." +variable "app_region" { + description = "Load balancer nginx region" + type = string + default = "" +} + +variable "node_port" { + description = "Node Port on which to expose kong." + type = number +} + +variable "app_env" { + description = "This is Environment where the NLB is deployed." type = string } +variable "app_name" { + description = "AppDynamics Controller URL." + type = string +} + variable "lb_ip" { description = "Global IP address to be added in LoadBalancer" - type = string + type = string } variable "prometheus_enabled" { diff --git a/k8s/oci/namespace/badger-db.tf b/k8s/oci/namespace/badger-db.tf index 90c98497..8b981b39 100644 --- a/k8s/oci/namespace/badger-db.tf +++ b/k8s/oci/namespace/badger-db.tf @@ -1,7 +1,7 @@ locals { badger_db_volume_mounts_services = tomap({ for k, v in var.services : k => { - (k) = { + "${k}" = { mount_path = "/etc/data" } } if coalesce(v.badger_db, false) @@ -9,7 +9,7 @@ locals { badger_db_volume_mounts_crons = tomap({ for k, v in var.cron_jobs : k => { - (k) = { + "${k}" = { mount_path = "/etc/data" } } if coalesce(v.badger_db, false) diff --git a/k8s/oci/namespace/main.tf b/k8s/oci/namespace/main.tf index e10556f4..f4cca7d3 100644 --- a/k8s/oci/namespace/main.tf +++ b/k8s/oci/namespace/main.tf @@ -1,5 +1,15 @@ locals { + service_oar_name_map = { + for key, config in var.services : key => coalesce(config.oar_name, key) + } + + cronjob_oar_name_map = { + for key, config in var.cron_jobs : key => coalesce(config.oar_name, key) + } + + oar_name_map = merge(local.service_oar_name_map, local.cronjob_oar_name_map) + artifact_users_map = { for user in data.oci_identity_users.all_users.users : user.email => user.id @@ -11,11 +21,11 @@ locals { if lookup(local.artifact_users_map, email, null) != null ] - common_tags = merge(var.common_tags, + common_tags = merge(var.common_tags, tomap({ - "zop.project" = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name, local.cluster_name) + "zop.project" = try(var.standard_tags.project != null ? var.standard_tags.project : local.cluster_name ,local.cluster_name) "zop.provisioner" = try(var.standard_tags.provisioner != null ? var.standard_tags.provisioner : "zop-dev", "zop-dev") - })) + })) } data "oci_identity_users" "all_users" { @@ -36,7 +46,7 @@ resource "kubernetes_namespace" "app_environments" { } resource "oci_identity_dynamic_group" "artifact_users_group" { - count = length(local.artifact_users) > 0 ? 1 : 0 + count = length(local.artifact_users) > 0 ? 1 : 0 name = "artifact-users-group" description = "Dynamic group for artifact registry users" @@ -53,11 +63,11 @@ data "oci_identity_compartment" "current" { } resource "oci_identity_policy" "artifact_access_policy" { - count = length(local.artifact_users) > 0 ? 1 : 0 + count = length(local.artifact_users) > 0 ? 1 : 0 name = "artifact-access-policy" description = "Allows access to artifact registries for dynamic group" compartment_id = data.oci_identity_compartment.current.id - + statements = ["Allow dynamic-group ${oci_identity_dynamic_group.artifact_users_group[0].name} to manage repos in compartment id ${data.oci_identity_compartment.current.id}"] } \ No newline at end of file diff --git a/k8s/oci/namespace/nginx.tf b/k8s/oci/namespace/nginx.tf index 30d5d7ed..88efb024 100644 --- a/k8s/oci/namespace/nginx.tf +++ b/k8s/oci/namespace/nginx.tf @@ -3,11 +3,11 @@ locals { default_domain_list = merge([ for service, service_config in var.services : { - (service) = { - ingress = ["${split(":", service)[0]}-${var.namespace}.${local.domain_name}"] + "${ service }" = { + ingress = ["${split(":", service)[0]}-${var.namespace}.${local.domain_name}"] basic_auth = (service_config.enable_basic_auth != null ? service_config.enable_basic_auth : false) ? true : false } - } if(coalesce(var.services[service].enable_default_ingress, false) == true) + } if (coalesce(var.services[service].enable_default_ingress, false) == true) ]...) service_custom_domain_list = merge([ @@ -23,12 +23,12 @@ locals { } # Exclude wildcard hosts from custom host logic if !can(regex("^\\*\\.", split("/", host)[0])) - }) if try(length(var.services[service].ingress_list), 0) != 0 + })if try(length(var.services[service].ingress_list),0) != 0 ]...) default_services_list = merge([ for service in keys(local.default_domain_list) : { - for ingress_name in local.default_domain_list[service].ingress : "${service}-${var.namespace}-${ingress_name}" => { + for ingress_name in local.default_domain_list[service].ingress : "${service}-${var.namespace}-${ingress_name}" => { service_name = split(":", service)[0] service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] # domain_name backward compatible with namespace based names if app_env is not given, if app_env is given then new scheme is chosen @@ -46,57 +46,57 @@ locals { wildcard_custom_hosts = merge([ for service, config in var.services : tomap({ for host in try(config.ingress_list, []) : - "${service}-${var.namespace}-${host}" => { - service_name = split(":", service)[0] - service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] - ingress_host = split("/", host)[0] - ns = var.namespace - ingress_name = lower(replace("${split(":", service)[0]}-${replace(host, "/", "-")}-wildcard-ingress", "*", "wildcard")) - base_domain = replace(split("/", host)[0], "*.", "") - } - if can(regex("^\\*\\.", split("/", host)[0])) + "${service}-${var.namespace}-${host}" => { + service_name = split(":", service)[0] + service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] + ingress_host = split("/", host)[0] + ns = var.namespace + ingress_name = lower(replace("${split(":", service)[0]}-${replace(host, "/", "-")}-wildcard-ingress", "*", "wildcard")) + base_domain = replace(split("/", host)[0], "*.", "") + } + if can(regex("^\\*\\.", split("/", host)[0])) }) if try(length(config.ingress_list), 0) != 0 ]...) } resource "random_password" "basic_auth_password" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } length = 32 special = true override_special = "_@" } resource "random_string" "basic_auth_user_name_suffix" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - length = 6 - special = true - upper = false - numeric = false + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + length = 6 + special = true + upper = false + numeric = false min_special = 2 - lower = true + lower = true } resource "oci_vault_secret" "basic_auth_credentials" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } - - secret_name = "${local.cluster_name}-${var.namespace}-${each.key}-basic-auth-credentials" - compartment_id = var.provider_id + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + + secret_name = "${local.cluster_name}-${var.namespace}-${each.key}-basic-auth-credentials" + compartment_id = var.provider_id - vault_id = local.kms_vault_id - key_id = local.kms_key_id + vault_id = local.kms_vault_id + key_id = local.kms_key_id secret_content { content_type = "DEFAULT" - content = jsonencode({ user_name = "${each.key}-${random_string.basic_auth_user_name_suffix[each.key].result}", - password = random_password.basic_auth_password[each.key].result }) + content = jsonencode({ user_name = "${each.key}-${random_string.basic_auth_user_name_suffix[each.key].result}", + password = random_password.basic_auth_password[each.key].result }) } } resource "kubernetes_secret_v1" "basic_auth_secret" { - for_each = { for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false } + for_each = {for k, v in var.services : k => v if v.enable_basic_auth != null ? v.enable_basic_auth : false} metadata { - name = "${each.key}-basic-auth" + name = "${each.key}-basic-auth" namespace = var.namespace } data = { @@ -106,15 +106,15 @@ resource "kubernetes_secret_v1" "basic_auth_secret" { } resource "kubernetes_ingress_v1" "default_service_ingress" { - for_each = { for service, value in local.default_services_list : service => value } + for_each = {for service, value in local.default_services_list : service => value } metadata { name = each.value.ingress_name namespace = each.value.ns annotations = { - "kubernetes.io/ingress.class" = "nginx" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "kubernetes.io/ingress.class" = "nginx" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -143,17 +143,17 @@ resource "kubernetes_ingress_v1" "default_service_ingress" { } resource "kubernetes_ingress_v1" "custom_service_ingress" { - for_each = { for service, value in local.service_custom_domain_list : service => value if value.path_based_routing == "" } + for_each = {for service, value in local.service_custom_domain_list : service => value if value.path_based_routing == "" } metadata { name = each.value.ingress_name namespace = each.value.ns annotations = { - "kubernetes.io/ingress.class" = "nginx" - "cert-manager.io/issuer" = "letsencrypt" - "kubernetes.io/tls-acme" = "true" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "kubernetes.io/ingress.class" = "nginx" + "cert-manager.io/issuer" = "letsencrypt" + "kubernetes.io/tls-acme" = "true" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -174,15 +174,15 @@ resource "kubernetes_ingress_v1" "custom_service_ingress" { } } tls { - secret_name = "tls-secret-${each.value.ingress_host}" - hosts = [each.value.ingress_host] + secret_name ="tls-secret-${each.value.ingress_host}" + hosts =[each.value.ingress_host] } } depends_on = [kubernetes_namespace.app_environments] } resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { - for_each = { for service, value in local.service_custom_domain_list : service => value if value.path_based_routing != "" } + for_each = {for service, value in local.service_custom_domain_list : service => value if value.path_based_routing != "" } metadata { name = each.value.ingress_name namespace = each.value.ns @@ -192,9 +192,9 @@ resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { "kubernetes.io/tls-acme" = "true" "nginx.ingress.kubernetes.io/use-regex" = "true" "nginx.ingress.kubernetes.io/rewrite-target" = "/$2" - "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" - "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" - "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" + "nginx.ingress.kubernetes.io/auth-type" = each.value.basic_auth ? "basic" : "" + "nginx.ingress.kubernetes.io/auth-secret" = each.value.basic_auth ? "${each.value.service_name}-basic-auth" : "" + "nginx.ingress.kubernetes.io/auth-realm" = each.value.basic_auth ? "Authentication Required" : "" } } spec { @@ -215,8 +215,8 @@ resource "kubernetes_ingress_v1" "custom_path_based_service_ingress" { } } tls { - secret_name = "tls-secret-${each.value.ingress_host}" - hosts = [each.value.ingress_host] + secret_name ="tls-secret-${each.value.ingress_host}" + hosts =[each.value.ingress_host] } } depends_on = [kubernetes_namespace.app_environments] diff --git a/k8s/oci/namespace/secrets.tf b/k8s/oci/namespace/secrets.tf index fc1fc84a..d518568c 100644 --- a/k8s/oci/namespace/secrets.tf +++ b/k8s/oci/namespace/secrets.tf @@ -1,5 +1,21 @@ locals { + custom_secrets = merge([ + for k in keys(var.services) : tomap({ + for secret in var.services[k].custom_secrets : "${k}-${secret}" => { + secret_name = secret + service = k + } + }) if var.services[k].custom_secrets != null + ]...) + cron_job_custom_secrets = merge([ + for k in keys(var.cron_jobs) : tomap({ + for secret in var.cron_jobs[k].custom_secrets : "${k}-${secret}" => { + secret_name = secret + cron_job = k + } + }) if var.cron_jobs[k].custom_secrets != null + ]...) } resource "kubernetes_service_account" "secrets" { @@ -13,16 +29,16 @@ resource "kubernetes_service_account" "secrets" { } resource "kubectl_manifest" "secrets_provider" { - for_each = { for k, v in var.services : k => v } - + for_each = { for k,v in var.services : k => v } + yaml_body = templatefile("${path.module}/templates/secret-provider-class.yaml", { secrets = jsonencode(concat( - (each.value.datastore_configs != null ? [{ - key = "DB_PASSWORD", - value = "${each.value.datastore_configs.name}-${var.namespace}-${replace(each.value.datastore_configs.database, "_", "-")}-${each.value.datastore_configs.type == "mysql" ? "mysql-db" : "postgres-db"}" + (each.value.datastore_configs != null ? [{ + key = "DB_PASSWORD", + value = "${each.value.datastore_configs.name}-${var.namespace}-${replace(each.value.datastore_configs.database,"_","-")}-${each.value.datastore_configs.type == "mysql" ? "mysql-db" : "postgres-db"}" }] : []), - try([for secret in each.value.custom_secrets : { key = secret, value = "${local.cluster_name}-${var.namespace}-${each.key}-${secret}-secret" }], []), + try([for secret in each.value.custom_secrets : { key = secret, value = "${local.cluster_name}-${var.namespace}-${each.key}-${secret}-secret"}], []), )) namespace = kubernetes_namespace.app_environments.metadata[0].name service_name = each.key @@ -32,16 +48,16 @@ resource "kubectl_manifest" "secrets_provider" { } resource "kubectl_manifest" "secrets_provider_cron_jobs" { - for_each = { for k, v in var.cron_jobs : k => v } + for_each = { for k,v in var.cron_jobs : k => v } yaml_body = templatefile("${path.module}/templates/secret-provider-class.yaml", { secrets = jsonencode(concat( - (each.value.datastore_configs != null ? [{ - key = "DB_PASSWORD", - value = "${each.value.datastore_configs.name}-${var.namespace}-${replace(each.value.datastore_configs.database, "_", "-")}-${each.value.datastore_configs.type == "mysql" ? "mysql-db" : "postgres-db"}" + (each.value.datastore_configs != null ? [{ + key = "DB_PASSWORD", + value = "${each.value.datastore_configs.name}-${var.namespace}-${replace(each.value.datastore_configs.database,"_","-")}-${each.value.datastore_configs.type == "mysql" ? "mysql-db" : "postgres-db"}" }] : []), - try([for secret in each.value.custom_secrets : { key = secret, value = "${local.cluster_name}-${var.namespace}-${each.key}-${secret}-secret" }], []), + try([for secret in each.value.custom_secrets : { key = secret, value = "${local.cluster_name}-${var.namespace}-${each.key}-${secret}-secret"}], []), )) namespace = kubernetes_namespace.app_environments.metadata[0].name service_name = each.key diff --git a/k8s/oci/namespace/vars.tf b/k8s/oci/namespace/vars.tf index 28af8d17..163ed58a 100644 --- a/k8s/oci/namespace/vars.tf +++ b/k8s/oci/namespace/vars.tf @@ -1,12 +1,18 @@ variable "app_name" { - description = "This is the name of the cluster. This name is also used to namespace all the other resources created by this module." - type = string + description = "This is the name of the cluster. This name is also used to namespace all the other resources created by this module." + type = string } variable "app_region" { - type = string + type = string description = "Location where the resources to be created" - default = "" + default = "" +} + +variable "app_env" { + description = "Application deployment environment." + type = string + default = "" } variable "namespace" { @@ -24,14 +30,14 @@ variable "accessibility" { variable "shared_services" { type = object({ - type = string - bucket = string - profile = optional(string) - location = optional(string) - resource_group = optional(string) + type = string + bucket = string + profile = optional(string) + location = optional(string) + resource_group = optional(string) storage_account = optional(string) - container = optional(string) - cluster_prefix = optional(string) + container = optional(string) + cluster_prefix = optional(string) }) } @@ -43,7 +49,7 @@ variable "common_tags" { variable "standard_tags" { description = "standard tags for resources" - type = object({ + type = object ({ project = optional(string) provisioner = optional(string) }) @@ -59,12 +65,12 @@ variable "provider_id" { variable "user_access" { description = "List of users who will have access to clusters" type = object({ - admins = optional(list(string)) + admins = optional(list(string)) viewers = optional(list(string)) editors = optional(list(string)) }) default = { - admins = [] + admins = [] viewers = [] editors = [] } @@ -83,7 +89,7 @@ variable "helm_charts" { default = {} } -variable "cert_issuer_config" { +variable "cert_issuer_config"{ description = "email to be added as cert-manager issuer" type = object({ env = optional(string) @@ -99,84 +105,84 @@ variable "artifact_users" { variable "services" { description = "Map of services to be deployed within the namespace" - type = map(object({ - repo_name = optional(string) - oar_name = optional(string) - region = optional(string) - account_id = optional(string) - db_name = optional(string) - redis = optional(bool) - local_redis = optional(bool) - custom_secrets = optional(list(string)) + type = map(object({ + repo_name = optional(string) + oar_name = optional(string) + region = optional(string) + account_id = optional(string) + db_name = optional(string) + redis = optional(bool) + local_redis = optional(bool) + custom_secrets = optional(list(string)) enable_default_ingress = optional(bool) enable_basic_auth = optional(bool) - service_deployer = string - ingress_list = optional(list(string)) - badger_db = optional(bool) + service_deployer = string + ingress_list = optional(list(string)) + badger_db = optional(bool) datastore_configs = optional(object({ - name = optional(string) + name = optional(string) database = optional(string) - type = optional(string) + type = optional(string) })) redis_configs = optional(object({ name = optional(string) port = optional(number) })) - helm_configs = optional(object({ + helm_configs = optional(object({ image_pull_secrets = optional(list(string)) - replica_count = optional(number) + replica_count = optional(number) image = optional(string) - cli_service = optional(bool) - http_port = optional(string) - metrics_port = optional(string) - ports = optional(map(any)) - min_cpu = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - min_available = optional(number) - heartbeat_url = optional(string) - env = optional(map(any)) - env_list = optional(list(object({ + cli_service = optional(bool) + http_port = optional(string) + metrics_port = optional(string) + ports = optional(map(any)) + min_cpu = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + min_available = optional(number) + heartbeat_url = optional(string) + env = optional(map(any)) + env_list = optional(list(object({ name = string value = string }))) - command = optional(list(string)) - configmaps_list = optional(list(string)) - secrets_list = optional(list(string)) - hpa = optional(object({ - enable = optional(bool) - min_replicas = optional(number) - max_replicas = optional(number) - cpu_limit = optional(string) - memory_limit = optional(string) + command = optional(list(string)) + configmaps_list = optional(list(string)) + secrets_list = optional(list(string)) + hpa = optional(object({ + enable = optional(bool) + min_replicas = optional(number) + max_replicas = optional(number) + cpu_limit = optional(string) + memory_limit = optional(string) })) - readiness_probes = optional(object({ + readiness_probes = optional(object({ enable = optional(bool) initial_delay_seconds = optional(number) period_seconds = optional(number) timeout_seconds = optional(number) failure_threshold = optional(number) })) - liveness_probes = optional(object({ + liveness_probes = optional(object({ enable = optional(bool) initial_delay_seconds = optional(number) period_seconds = optional(number) timeout_seconds = optional(number) failure_threshold = optional(number) })) - volume_mounts = optional(object({ - configmaps = optional(map(object({ + volume_mounts = optional(object({ + configmaps = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - secrets = optional(map(object({ + secrets = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - pvc = optional(map(object({ + pvc = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) @@ -191,63 +197,63 @@ variable "services" { })) })) })) - default = {} + default = {} } variable "cron_jobs" { description = "Map of cron jobs to be executed within the namespace" - type = map(object({ - repo_name = optional(string) - oar_name = optional(string) - gar_project = optional(string) - db_name = optional(string) - topics = optional(list(string)) - subscriptions = optional(list(string)) - pub_sub = optional(bool) - redis = optional(bool) - local_redis = optional(bool) - service_account = optional(string) - custom_secrets = optional(list(string)) - ingress_list = optional(list(string)) + type = map(object({ + repo_name = optional(string) + oar_name = optional(string) + gar_project = optional(string) + db_name = optional(string) + topics = optional(list(string)) + subscriptions = optional(list(string)) + pub_sub = optional(bool) + redis = optional(bool) + local_redis = optional(bool) + service_account= optional(string) + custom_secrets = optional(list(string)) + ingress_list = optional(list(string)) enable_basic_auth = optional(bool) enable_default_ingress = optional(bool) - badger_db = optional(bool) + badger_db = optional(bool) datastore_configs = optional(object({ - name = optional(string) + name = optional(string) database = optional(string) - type = optional(string) + type = optional(string) })) redis_configs = optional(object({ name = optional(string) port = optional(number) })) - helm_configs = optional(object({ + helm_configs = optional(object({ image_pull_secrets = optional(list(string)) image = optional(string) schedule = string suspend = optional(bool) concurrency_policy = optional(string) - http_port = optional(string) - metrics_port = optional(string) - min_cpu = optional(string) - min_memory = optional(string) - max_cpu = optional(string) - max_memory = optional(string) - env = optional(map(any)) - env_list = optional(list(object({ + http_port = optional(string) + metrics_port = optional(string) + min_cpu = optional(string) + min_memory = optional(string) + max_cpu = optional(string) + max_memory = optional(string) + env = optional(map(any)) + env_list = optional(list(object({ name = string value = string }))) - command = optional(list(string)) - configmaps_list = optional(list(string)) - secrets_list = optional(list(string)) - volume_mounts = optional(object({ - configmaps = optional(map(object({ + command = optional(list(string)) + configmaps_list = optional(list(string)) + secrets_list = optional(list(string)) + volume_mounts = optional(object({ + configmaps = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) }))) - secrets = optional(map(object({ + secrets = optional(map(object({ mount_path = string sub_path = optional(string) read_only = optional(bool) @@ -258,23 +264,23 @@ variable "cron_jobs" { })) })) })) - default = {} + default = {} } variable "sql_list" { type = map(object({ - type = optional(string) - admin_user = optional(string) - storage = optional(number) - storage_scaling = optional(bool) - storage_tier = optional(string) - read_replica = optional(bool) - enable_ssl = optional(bool) - deletion_protection = optional(bool) - backup_retention_days = optional(number) - psql_version = optional(number) - iops = optional(number) - system_type = optional(string) + type = optional(string) + admin_user = optional(string) + storage = optional(number) + storage_scaling = optional(bool) + storage_tier = optional(string) + read_replica = optional(bool) + enable_ssl = optional(bool) + deletion_protection = optional(bool) + backup_retention_days = optional(number) + psql_version = optional(number) + iops = optional(number) + system_type = optional(string) })) default = null } diff --git a/k8s/oci/nginx/vars.tf b/k8s/oci/nginx/vars.tf index 2ddcaa54..b3516b73 100644 --- a/k8s/oci/nginx/vars.tf +++ b/k8s/oci/nginx/vars.tf @@ -1,3 +1,7 @@ +variable "oci_compartment_id" { + description = "OCI Compartment ID where resources will be created" + type = string +} variable "load_balancer_shape" { description = "The shape of the load balancer" @@ -17,7 +21,7 @@ variable "app_name" { variable "lb_ip" { description = "Global IP address to be added in LoadBalancer" - type = string + type = string } variable "prometheus_enabled" { diff --git a/k8s/oci/oke/fluentbit.tf b/k8s/oci/oke/fluentbit.tf index 55750656..671b5bec 100644 --- a/k8s/oci/oke/fluentbit.tf +++ b/k8s/oci/oke/fluentbit.tf @@ -1,31 +1,31 @@ locals { - fluent_bit_enable = var.fluent_bit != null ? (var.fluent_bit.enable != null ? var.fluent_bit.enable : false) : false - fluent_bit_loki = local.fluent_bit_enable ? (var.fluent_bit.loki != null ? var.fluent_bit.loki : []) : [] - fluent_bit_http = local.fluent_bit_enable ? (var.fluent_bit.http != null ? var.fluent_bit.http : []) : [] - fluent_bit_splunk = local.fluent_bit_enable ? (var.fluent_bit.splunk != null ? var.fluent_bit.splunk : []) : [] - fluent_bit_datadog = local.fluent_bit_enable ? (var.fluent_bit.datadog != null ? var.fluent_bit.datadog : []) : [] - fluent_bit_newrelic = local.fluent_bit_enable ? (var.fluent_bit.new_relic != null ? var.fluent_bit.new_relic : []) : [] - fluent_bit_slack = local.fluent_bit_enable ? (var.fluent_bit.slack != null ? var.fluent_bit.slack : []) : [] + fluent_bit_enable = var.fluent_bit != null ? (var.fluent_bit.enable != null ? var.fluent_bit.enable : false): false + fluent_bit_loki = local.fluent_bit_enable ? (var.fluent_bit.loki != null ? var.fluent_bit.loki : []) : [] + fluent_bit_http = local.fluent_bit_enable ? (var.fluent_bit.http != null ? var.fluent_bit.http : []) : [] + fluent_bit_splunk = local.fluent_bit_enable ? (var.fluent_bit.splunk != null ? var.fluent_bit.splunk : []): [] + fluent_bit_datadog = local.fluent_bit_enable ? (var.fluent_bit.datadog != null ? var.fluent_bit.datadog : []): [] + fluent_bit_newrelic = local.fluent_bit_enable ? (var.fluent_bit.new_relic != null ? var.fluent_bit.new_relic : []): [] + fluent_bit_slack = local.fluent_bit_enable ? (var.fluent_bit.slack != null ? var.fluent_bit.slack : []): [] fluent_bit_loki_outputs = concat([ - for k, v in local.fluent_bit_loki : { + for k,v in local.fluent_bit_loki : { host = v.host tenant_id = v.tenant_id != null ? v.tenant_id : "" labels = v.labels port = v.port != null ? v.port : 3100 - tls = v.tls != null ? v.tls : "On" + tls = v.tls != null ? v.tls : "On" } if length(local.fluent_bit_loki) > 0 - ], local.enable_loki ? [{ - host = "loki-distributor.loki" - tenant_id = random_uuid.grafana_standard_datasource_header_value.result - labels = "namespace=$kubernetes['namespace_name'],pod=$kubernetes['pod_name'],service=$kubernetes['container_name'],cluster=${local.cluster_name}" - port = 3100 - tls = "Off" - }] : []) + ], local.enable_loki ? [{ + host = "loki-distributor.loki" + tenant_id = random_uuid.grafana_standard_datasource_header_value.result + labels = "namespace=$kubernetes['namespace_name'],pod=$kubernetes['pod_name'],service=$kubernetes['container_name'],cluster=${local.cluster_name}" + port = 3100 + tls = "Off" + }]: []) fluent_bit_http_outputs = [ - for k, v in local.fluent_bit_http : { + for k,v in local.fluent_bit_http : { host = v.host port = v.port != null ? v.port : 80 uri = v.uri != null ? v.uri : "/" @@ -36,7 +36,7 @@ locals { ] fluent_bit_splunk_outputs = [ - for k, v in local.fluent_bit_splunk : { + for k,v in local.fluent_bit_splunk : { host = v.host token = v.token port = v.port != null ? v.port : 8088 @@ -46,36 +46,36 @@ locals { ] fluent_bit_datadog_outputs = [ - for k, v in local.fluent_bit_datadog : { - host = v.host - api_key = v.api_key - tls = v.tls != null ? v.tls : "On" - compress = v.compress != null ? v.compress : "gzip" + for k,v in local.fluent_bit_datadog : { + host = v.host + api_key = v.api_key + tls = v.tls != null ? v.tls : "On" + compress = v.compress != null ? v.compress : "gzip" } if length(local.fluent_bit_datadog) > 0 ] fluent_bit_newrelic_outputs = [ - for k, v in local.fluent_bit_newrelic : { - host = v.host != null ? v.host : "https://log-api.eu.newrelic.com/log/v1" - api_key = v.api_key - compress = v.compress != null ? v.compress : "gzip" + for k,v in local.fluent_bit_newrelic : { + host = v.host != null ? v.host : "https://log-api.eu.newrelic.com/log/v1" + api_key = v.api_key + compress = v.compress != null ? v.compress : "gzip" } if length(local.fluent_bit_newrelic) > 0 ] fluent_bit_slack_outputs = [ - for k, v in local.fluent_bit_slack : { - webhook = v.webhook + for k,v in local.fluent_bit_slack : { + webhook = v.webhook } if length(local.fluent_bit_slack) > 0 ] } -data template_file "fluent-bit" { +data template_file "fluent-bit"{ count = local.fluent_bit_enable ? 1 : 0 template = file("./templates/fluent-bit-values.yaml") - vars = { - "CLUSTER_NAME" = local.cluster_name - "TAGS" = join(",", [for key, value in local.common_tags : "${key}=${value}"]) + vars = { + "CLUSTER_NAME" = local.cluster_name + "TAGS" = join(",", [for key, value in local.common_tags : "${key}=${value}"]) "HTTP_SERVER" = "On" "HTTP_PORT" = "2020" @@ -83,22 +83,22 @@ data template_file "fluent-bit" { "READ_FROM_HEAD" = "Off" "READ_FROM_TAIL" = "On" - fluent_bit_loki_outputs = jsonencode(local.fluent_bit_loki_outputs) - fluent_bit_http_outputs = jsonencode(local.fluent_bit_http_outputs) - fluent_bit_splunk_outputs = jsonencode(local.fluent_bit_splunk_outputs) - fluent_bit_datadog_outputs = jsonencode(local.fluent_bit_datadog_outputs) + fluent_bit_loki_outputs = jsonencode(local.fluent_bit_loki_outputs) + fluent_bit_http_outputs = jsonencode(local.fluent_bit_http_outputs) + fluent_bit_splunk_outputs = jsonencode(local.fluent_bit_splunk_outputs) + fluent_bit_datadog_outputs = jsonencode(local.fluent_bit_datadog_outputs) fluent_bit_newrelic_outputs = jsonencode(local.fluent_bit_newrelic_outputs) - fluent_bit_slack_outputs = jsonencode(local.fluent_bit_slack_outputs) + fluent_bit_slack_outputs = jsonencode(local.fluent_bit_slack_outputs) } } resource "helm_release" "fluentbit-config" { - count = local.fluent_bit_enable ? 1 : 0 + count = local.fluent_bit_enable ? 1 : 0 repository = "https://fluent.github.io/helm-charts" chart = "fluent-bit" name = "fluent-bit" version = "0.35.0" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name values = [ data.template_file.fluent-bit[0].rendered diff --git a/k8s/oci/oke/grafana-dashboard.tf b/k8s/oci/oke/grafana-dashboard.tf index 81bec390..faa04f78 100644 --- a/k8s/oci/oke/grafana-dashboard.tf +++ b/k8s/oci/oke/grafana-dashboard.tf @@ -3,13 +3,13 @@ locals { folder_creation = false grafana_dashboard_folder = local.folder_creation ? { - Kong = ["kong-official"] - Partner_Standard_API = ["partner-standard-api"] - Disk_Utilization = ["cortex-disk-utilization", "prometheus-disk-utilization"] + Kong = ["kong-official"] + Partner_Standard_API = ["partner-standard-api"] + Disk_Utilization = ["cortex-disk-utilization", "prometheus-disk-utilization"] } : {} folder_map = [ - for key, value in local.grafana_dashboard_folder : { + for key, value in local.grafana_dashboard_folder : { folder = key dashboards = value } @@ -17,13 +17,13 @@ locals { dashboard_map = merge([ for key, value in local.folder_map : { - for dashboard in value.dashboards : "${value.folder}-${dashboard}" => { + for dashboard in value.dashboards : "${value.folder}-${dashboard}" => { folder = value.folder dashboard = dashboard } } ]...) - + role_map = { app_admins = "Admin" app_editors = "Editor" @@ -39,11 +39,14 @@ locals { ] ]) + users_with_roles_map = { + for user in local.users_with_roles : user.email => user + } } resource "null_resource" "wait_for_grafana" { provisioner "local-exec" { - command = <<-EOT + command = <<-EOT #!/bin/bash DOMAIN_NAME="${local.domain_name}" @@ -100,23 +103,23 @@ resource "null_resource" "wait_for_grafana" { } resource "random_password" "admin_passwords" { - for_each = coalesce(toset(var.user_access.app_admins), toset([])) - length = 12 - special = true + for_each = coalesce(toset(var.user_access.app_admins), toset([])) + length = 12 + special = true override_special = "$" } resource "random_password" "editor_passwords" { - for_each = coalesce(toset(var.user_access.app_editors), toset([])) - length = 12 - special = true + for_each = coalesce(toset(var.user_access.app_editors), toset([])) + length = 12 + special = true override_special = "$" } resource "random_password" "viewer_passwords" { - for_each = coalesce(toset(var.user_access.app_viewers), toset([])) - length = 12 - special = true + for_each = coalesce(toset(var.user_access.app_viewers), toset([])) + length = 12 + special = true override_special = "$" } @@ -128,7 +131,7 @@ resource "grafana_user" "admins" { password = random_password.admin_passwords[each.key].result is_admin = true - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_user" "editors" { @@ -139,7 +142,7 @@ resource "grafana_user" "editors" { password = random_password.editor_passwords[each.key].result is_admin = false - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_user" "viewers" { @@ -150,7 +153,7 @@ resource "grafana_user" "viewers" { password = random_password.viewer_passwords[each.key].result is_admin = false - depends_on = [null_resource.wait_for_grafana] + depends_on = [ null_resource.wait_for_grafana ] } resource "grafana_folder" "dashboard_folder" { @@ -170,7 +173,7 @@ resource "grafana_api_key" "admin_token" { name = "terraform-admin-token" role = "Admin" - depends_on = [grafana_user.admins, grafana_user.editors, grafana_user.viewers] + depends_on = [ grafana_user.admins, grafana_user.editors, grafana_user.viewers ] } resource "null_resource" "update_user_roles" { @@ -178,8 +181,8 @@ resource "null_resource" "update_user_roles" { for user in local.users_with_roles : "${user.email}-${user.role}" => user } - provisioner "local-exec" { - command = < v } + for_each = {for k,v in local.grafana_datasource_list : k => v} metadata { - name = "grafana-${each.key}-datasource" + name = "grafana-${each.key}-datasource" namespace = helm_release.grafana[0].namespace labels = { grafana_datasource = "1" @@ -82,10 +83,10 @@ resource "kubernetes_config_map" "grafana_custom_datasource" { data = { "datasource.yaml" = templatefile("${path.module}/templates/grafana-custom-datasource.yaml", { - tempo_datasource = local.enable_tempo - loki_datasource = local.enable_loki - mimir_datasource = local.enable_mimir - datasource_name = each.key + tempo_datasource = local.enable_tempo + loki_datasource = local.enable_loki + mimir_datasource = local.enable_mimir + datasource_name = each.key datasource_header_value = each.value } ) @@ -100,7 +101,7 @@ resource "kubernetes_config_map" "grafana_standard_datasource" { metadata { name = "grafana-standard-datasource" namespace = helm_release.grafana[0].namespace - labels = { + labels = { grafana_datasource = "1" } } @@ -108,14 +109,14 @@ resource "kubernetes_config_map" "grafana_standard_datasource" { data = { "datasource.yaml" = templatefile("./templates/grafana-standard-datasource.yaml", { - datasource_name = local.cluster_name + datasource_name = local.cluster_name datasource_header_value = random_uuid.grafana_standard_datasource_header_value.result - mimir_create = local.enable_mimir - loki_create = local.enable_loki - tempo_create = local.enable_tempo - cortex_create = local.enable_cortex - prometheus_create = local.prometheus_enable - }) + mimir_create = local.enable_mimir + loki_create = local.enable_loki + tempo_create = local.enable_tempo + cortex_create = local.enable_cortex + prometheus_create = local.prometheus_enable + }) } } @@ -124,31 +125,31 @@ resource "kubernetes_config_map" "grafana_service_dashboard" { metadata { name = "grafana-service-dashboard" namespace = helm_release.grafana[0].namespace - labels = { + labels = { grafana_dashboard = "1" } } data = { - "kong.json" = file("./templates/kong-official.json") - "cronjob.json" = file("./templates/cronjob.json") - "partner-standard-api.json" = file("./templates/partner-standard-api.json") - "cortex-disk-utilization.json" = file("./templates/cortex-disk-utilization.json") - "prometheus-disk-utilization.json" = file("./templates/prometheus-disk-utilization.json") + "kong.json" = file("./templates/kong-official.json") + "cronjob.json" = file("./templates/cronjob.json") + "partner-standard-api.json" = file("./templates/partner-standard-api.json") + "cortex-disk-utilization.json" = file("./templates/cortex-disk-utilization.json") + "prometheus-disk-utilization.json" = file("./templates/prometheus-disk-utilization.json") } } resource "oci_vault_secret" "observability_admin" { count = local.grafana_enable ? 1 : 0 - + compartment_id = var.provider_id secret_name = "${local.cluster_name}-grafana-admin-password" vault_id = oci_kms_vault.oci_vault.id key_id = oci_kms_key.oci_key.id - + secret_content { content_type = "BASE64" - content = base64encode(random_password.observability_admin[0].result) + content = base64encode(random_password.observability_admin.0.result) } } diff --git a/k8s/oci/oke/main.tf b/k8s/oci/oke/main.tf index a83e81e9..745d6937 100644 --- a/k8s/oci/oke/main.tf +++ b/k8s/oci/oke/main.tf @@ -1,11 +1,16 @@ locals { cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" - common_tags = merge(var.common_tags, + common_tags = merge(var.common_tags, tomap({ Project = local.cluster_name, provisioner = "zop-dev", - })) + })) +} + +data "oci_containerengine_clusters" "cluster" { + compartment_id = var.provider_id + name = local.cluster_name } # OKE Module @@ -16,35 +21,35 @@ module "oke" { oci.home = oci } - compartment_id = var.provider_id - cluster_name = local.cluster_name - kubernetes_version = "v1.33.1" + compartment_id = var.provider_id + cluster_name = local.cluster_name + kubernetes_version = "v1.33.1" - create_vcn = false - vcn_id = data.oci_core_vcn.vcn.id - region = var.app_region - cluster_type = "enhanced" + create_vcn = false + vcn_id = data.oci_core_vcn.vcn.id + region = var.app_region + cluster_type = "enhanced" - create_bastion = false - assign_public_ip_to_control_plane = true - control_plane_is_public = true - output_detail = true + create_bastion = false + assign_public_ip_to_control_plane = true + control_plane_is_public = true + output_detail = true - worker_compartment_id = var.provider_id - worker_pool_mode = "node-pool" - worker_image_type = "oke" + worker_compartment_id = var.provider_id + worker_pool_mode = "node-pool" + worker_image_type = "oke" subnets = { - cp = { id = local.cp_subnet_ids } - workers = { id = local.worker_subnet_ids } - pub_lb = { id = local.publb_subnet_ids } + cp = { id = local.cp_subnet_ids} + workers = { id = local.worker_subnet_ids } + pub_lb = { id = local.publb_subnet_ids } } worker_pools = { np1 = { shape = var.node_config.node_type, - ocpus = var.node_config.ocpus, - memory = var.node_config.memory, + ocpus = var.node_config.ocpus, + memory = var.node_config.memory, size = var.node_config.size, boot_volume_size = var.node_config.boot_volume_size, kubernetes_version = "v1.33.1" @@ -52,10 +57,10 @@ module "oke" { } cluster_freeform_tags = merge( - { for k, v in local.common_tags : k => tostring(v) }, + { for k, v in local.common_tags : k => tostring(v) }, { "Name" = local.cluster_name } ) - freeform_tags = { - cluster = { provisioner = "zop-dev" } - } + freeform_tags = { + cluster = { provisioner = "zop-dev" } + } } diff --git a/k8s/oci/oke/prometheus.tf b/k8s/oci/oke/prometheus.tf index 6d746626..47676011 100644 --- a/k8s/oci/oke/prometheus.tf +++ b/k8s/oci/oke/prometheus.tf @@ -3,7 +3,7 @@ resource "kubernetes_namespace" "monitoring" { name = "monitoring" } - depends_on = [module.oke] + depends_on = [ module.oke ] } resource "kubernetes_secret" "prometheus_remote_write_auth" { @@ -21,21 +21,21 @@ resource "kubernetes_secret" "prometheus_remote_write_auth" { type = "Opaque" } -locals { +locals{ ### this app namespace level alerts: - namespace_teams_webhook = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = substr(v.data, 8, length(v.data)), labels = v.labels == null ? merge(v.labels, { severity = "critical", servicealert = "true", namespace = n }) : merge(v.labels, { namespace = n }), } if v.type == "teams" } if s.alert_webhooks != null]...) - namespace_google_chat_alerts = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = v.data, labels = v.labels == null ? merge(v.labels, { severity = "critical", servicealert = "true", namespace = n }) : merge(v.labels, { namespace = n }), } if v.type == "google_chat" } if s.alert_webhooks != null]...) + namespace_teams_webhook = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = substr(v.data, 8, length(v.data)), labels = v.labels == null ? merge(v.labels, {severity = "critical", servicealert = "true",namespace = n}) : merge(v.labels, {namespace = n}), } if v.type == "teams"}if s.alert_webhooks != null]...) + namespace_google_chat_alerts = merge([for n, s in var.app_namespaces : { for k, v in s.alert_webhooks : "namespace-webhook-${n}-${k}" => { data = v.data, labels = v.labels == null ? merge(v.labels, {severity = "critical", servicealert = "true",namespace = n}) : merge(v.labels, {namespace = n}), } if v.type == "google_chat"}if s.alert_webhooks != null]...) ### this is cluster level alerts: - cluster_teams_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "cluster-webhook-${key}" => { data = substr(val.data, 8, length(val.data)), labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "teams" } - cluster_moogsoft_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "moogsoft-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "moogsoft" } - cluster_pagerduty_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "pagerduty-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "pagerduty" } - cluster_google_chat_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "google-chat-webhook-${key}" => { data = val.data, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } if val.type == "google_chat" } - cluster_alerts = merge(local.namespace_teams_webhook, local.cluster_teams_alerts) - cluster_alerts_webhook = merge(local.cluster_alerts, local.cluster_moogsoft_alerts, local.cluster_pagerduty_alerts) - cluster_slack_alerts = jsonencode(var.slack_alerts_configs) == "" ? {} : { for key, val in var.slack_alerts_configs : "slack-alert-${val.name}" => { url = val.url, channel = val.channel, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } } - cluster_webhook_alerts = jsonencode(var.webhook_alerts_configs) == "" ? {} : { for key, val in var.webhook_alerts_configs : "webhook-alert-${val.name}" => { url = val.url, send_resolved = val.send_resolved, labels = val.labels == null ? { severity = "critical", servicealert = "true" } : val.labels, } } - google_chat_alerts = merge(local.cluster_google_chat_alerts, local.namespace_google_chat_alerts) + cluster_teams_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "cluster-webhook-${key}" => { data = substr(val.data,8 ,length(val.data) ),labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "teams"} + cluster_moogsoft_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "moogsoft-webhook-${key}" => { data = val.data,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "moogsoft"} + cluster_pagerduty_alerts = jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "pagerduty-webhook-${key}" => { data = val.data,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "pagerduty"} + cluster_google_chat_alerts= jsonencode(var.cluster_alert_webhooks) == "" ? {} : { for key, val in var.cluster_alert_webhooks : "google-chat-webhook-${key}" => {data = val.data, labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, } if val.type == "google_chat"} + cluster_alerts = merge(local.namespace_teams_webhook, local.cluster_teams_alerts) + cluster_alerts_webhook = merge(local.cluster_alerts, local.cluster_moogsoft_alerts, local.cluster_pagerduty_alerts) + cluster_slack_alerts = jsonencode(var.slack_alerts_configs) == "" ? {} : { for key, val in var.slack_alerts_configs : "slack-alert-${val.name}" => {url = val.url, channel = val.channel,labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, }} + cluster_webhook_alerts = jsonencode(var.webhook_alerts_configs) == "" ? {} : { for key, val in var.webhook_alerts_configs : "webhook-alert-${val.name}" => {url = val.url, send_resolved = val.send_resolved, labels = val.labels == null ? {severity = "critical", servicealert = "true"} : val.labels, }} + google_chat_alerts = merge( local.cluster_google_chat_alerts, local.namespace_google_chat_alerts) # Create secrets for user-provided remote write configs with basic auth prometheus_remote_write_secrets = try(var.observability_config.prometheus.remote_write, null) != null ? { @@ -55,9 +55,9 @@ locals { ] : [] default_remote_write_config = local.enable_mimir && local.prometheus_enable ? [{ - host = "http://mimir-distributor.mimir:8080/api/v1/push" - key = "X-Scope-OrgID" - value = random_uuid.grafana_standard_datasource_header_value.result + host = "http://mimir-distributor.mimir:8080/api/v1/push" + key = "X-Scope-OrgID" + value = random_uuid.grafana_standard_datasource_header_value.result secret_name = null }] : [] @@ -68,31 +68,31 @@ data "template_file" "prom_template" { count = local.prometheus_enable ? 1 : 0 template = file("./templates/prometheus-values.yaml") - vars = { - PROMETHEUS_DISK_SIZE = try(var.observability_config.prometheus.persistence.disk_size != null ? var.observability_config.prometheus.persistence.disk_size : "50Gi", "50Gi") - PROMETHEUS_RETENTION_SIZE = try(var.observability_config.prometheus.persistence.retention_size != null ? var.observability_config.prometheus.persistence.retention_size : "20GB", "20GB") - PROMETHEUS_RETENTION_DURATION = try(var.observability_config.prometheus.persistence.retention_duration != null ? var.observability_config.prometheus.persistence.retention_duration : "7d", "7d") - CLUSTER_NAME = local.cluster_name - REMOTE_WRITE_CONFIGS = jsonencode(local.remote_write_config) - ALERTS_ENABLED = jsonencode(local.cluster_moogsoft_alerts) != "" || jsonencode(local.namespace_teams_webhook) != "" || jsonencode(local.cluster_teams_alerts) != "" || jsonencode(local.google_chat_alerts) != "" || jsonencode(local.cluster_slack_alerts) != "" || jsonencode(local.cluster_webhook_alerts) != "" ? true : false - MOOGSOFT_ALERTS_ENABLED = local.cluster_moogsoft_alerts == {} ? false : true - MS_TEAMS_ALERT_ENABLED = jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? false : true - MOOGSOFT_ENDPOINT_URL = jsonencode(local.cluster_moogsoft_alerts) - MOOGSOFT_ENDPOINT_API_KEY = var.moogsoft_endpoint_api_key - MOOGSOFT_USERNAME = var.moogsoft_username - teams_webhook_alerts = jsonencode(local.cluster_alerts) - cluster_moogsoft_alerts = jsonencode(local.cluster_moogsoft_alerts) - cluster_teams_alerts = jsonencode(local.cluster_alerts_webhook) - GOOGLE_CHAT_ALERTS_ENABLED = local.google_chat_alerts == "" ? false : true - SLACK_CHAT_ALERTS_ENABLED = local.cluster_slack_alerts == "" ? false : true - WEBHOOK_ALERTS_ENABLED = local.cluster_webhook_alerts == "" ? false : true - GOOGLE_CHAT_CONFIGS = jsonencode(local.google_chat_alerts) - SLACK_CONFIGS = jsonencode(local.cluster_slack_alerts) - WEBHOOK_CONFIGS = jsonencode(local.cluster_webhook_alerts) - PAGER_DUTY_ALERTS_ENABLED = local.cluster_pagerduty_alerts == "" ? false : true - PAGER_DUTY_KEY = var.pagerduty_integration_key - PAGER_DUTY_ENDPOINT_URL = jsonencode(local.cluster_pagerduty_alerts) - GRAFANA_HOST = local.grafana_enable ? local.grafana_host : "" + vars = { + PROMETHEUS_DISK_SIZE = try(var.observability_config.prometheus.persistence.disk_size != null ? var.observability_config.prometheus.persistence.disk_size : "50Gi", "50Gi") + PROMETHEUS_RETENTION_SIZE = try(var.observability_config.prometheus.persistence.retention_size != null ? var.observability_config.prometheus.persistence.retention_size : "20GB", "20GB") + PROMETHEUS_RETENTION_DURATION = try(var.observability_config.prometheus.persistence.retention_duration != null ? var.observability_config.prometheus.persistence.retention_duration : "7d", "7d") + CLUSTER_NAME = local.cluster_name + REMOTE_WRITE_CONFIGS = jsonencode(local.remote_write_config) + ALERTS_ENABLED = jsonencode(local.cluster_moogsoft_alerts) != "" || jsonencode(local.namespace_teams_webhook) != "" || jsonencode(local.cluster_teams_alerts) != "" || jsonencode(local.google_chat_alerts) != "" || jsonencode(local.cluster_slack_alerts) != "" || jsonencode(local.cluster_webhook_alerts) != "" ? true : false + MOOGSOFT_ALERTS_ENABLED = local.cluster_moogsoft_alerts == {} ? false : true + MS_TEAMS_ALERT_ENABLED = jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? false : true + MOOGSOFT_ENDPOINT_URL = jsonencode(local.cluster_moogsoft_alerts) + MOOGSOFT_ENDPOINT_API_KEY = var.moogsoft_endpoint_api_key + MOOGSOFT_USERNAME = var.moogsoft_username + teams_webhook_alerts = jsonencode(local.cluster_alerts) + cluster_moogsoft_alerts = jsonencode(local.cluster_moogsoft_alerts) + cluster_teams_alerts = jsonencode(local.cluster_alerts_webhook) + GOOGLE_CHAT_ALERTS_ENABLED = local.google_chat_alerts == "" ? false : true + SLACK_CHAT_ALERTS_ENABLED = local.cluster_slack_alerts == "" ? false : true + WEBHOOK_ALERTS_ENABLED = local.cluster_webhook_alerts == "" ? false : true + GOOGLE_CHAT_CONFIGS = jsonencode(local.google_chat_alerts) + SLACK_CONFIGS = jsonencode(local.cluster_slack_alerts) + WEBHOOK_CONFIGS = jsonencode(local.cluster_webhook_alerts) + PAGER_DUTY_ALERTS_ENABLED = local.cluster_pagerduty_alerts == "" ? false : true + PAGER_DUTY_KEY = var.pagerduty_integration_key + PAGER_DUTY_ENDPOINT_URL = jsonencode(local.cluster_pagerduty_alerts) + GRAFANA_HOST = local.grafana_enable ? local.grafana_host : "" } } @@ -103,7 +103,7 @@ resource "helm_release" "prometheus" { chart = "kube-prometheus-stack" name = "prometheus" - namespace = kubernetes_namespace.monitoring.metadata[0].name + namespace = kubernetes_namespace.monitoring.metadata.0.name create_namespace = true version = try(var.observability_config.prometheus.version != null ? var.observability_config.prometheus.version : "60.0.0", "60.0.0") timeout = 1200 @@ -116,7 +116,7 @@ resource "helm_release" "prometheus" { } resource "helm_release" "alerts_teams" { - count = local.prometheus_enable && local.grafana_enable ? (jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? 0 : 1) : 0 + count = local.prometheus_enable && local.grafana_enable ? (jsonencode(local.namespace_teams_webhook) == "" && jsonencode(local.cluster_teams_alerts) == "" ? 0 : 1 ) : 0 repository = "https://prometheus-msteams.github.io/prometheus-msteams" chart = "prometheus-msteams" @@ -131,20 +131,20 @@ resource "helm_release" "alerts_teams" { data "template_file" "cluster-alerts" { template = file("./templates/cluster-level-alerts.yaml") - vars = { + vars = { cluster_memory_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_underutilisation != null ? var.cluster_alert_thresholds.memory_underutilisation : 20) - cluster_cpu_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.cpu_underutilisation != null ? var.cluster_alert_thresholds.cpu_underutilisation : 20) - cluster_node_count_max_value = var.node_config.size - cluster_node_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.node_count != null ? var.cluster_alert_thresholds.node_count : 80) - cluster_pod_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.pod_count != null ? var.cluster_alert_thresholds.pod_count : 80) - cluster_total_cpu_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cpu_utilisation != null ? var.cluster_alert_thresholds.cpu_utilisation : 80) - cluster_total_memory_utilization_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_utilisation != null ? var.cluster_alert_thresholds.memory_utilisation : 20) - cluster_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.disk_utilization != null ? var.cluster_alert_thresholds.disk_utilization : 80) - cluster_name = local.cluster_name - cortex_enabled = try(var.observability_config.cortex == null ? false : var.observability_config.cortex.enable, false) - nginx_5xx_percentage_threshold = var.cluster_alert_thresholds == null ? 5 : (var.cluster_alert_thresholds.nginx_5xx_percentage_threshold != null ? var.cluster_alert_thresholds.nginx_5xx_percentage_threshold : 5) - cortex_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cortex_disk_utilization_threshold != null ? var.cluster_alert_thresholds.cortex_disk_utilization_threshold : 80) - prometheus_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.prometheus_disk_utilization_threshold != null ? var.cluster_alert_thresholds.prometheus_disk_utilization_threshold : 80) + cluster_cpu_usage_request_underutilisation_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.cpu_underutilisation != null ? var.cluster_alert_thresholds.cpu_underutilisation : 20) + cluster_node_count_max_value = var.node_config.size + cluster_node_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.node_count != null ? var.cluster_alert_thresholds.node_count : 80) + cluster_pod_count_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.pod_count != null ? var.cluster_alert_thresholds.pod_count: 80) + cluster_total_cpu_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cpu_utilisation != null ? var.cluster_alert_thresholds.cpu_utilisation: 80) + cluster_total_memory_utilization_threshold = var.cluster_alert_thresholds == null ? 20 : (var.cluster_alert_thresholds.memory_utilisation != null ? var.cluster_alert_thresholds.memory_utilisation: 20) + cluster_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.disk_utilization != null ? var.cluster_alert_thresholds.disk_utilization: 80) + cluster_name = local.cluster_name + cortex_enabled = try(var.observability_config.cortex == null ? false : var.observability_config.cortex.enable, false) + nginx_5xx_percentage_threshold = var.cluster_alert_thresholds == null ? 5 : (var.cluster_alert_thresholds.nginx_5xx_percentage_threshold != null ? var.cluster_alert_thresholds.nginx_5xx_percentage_threshold: 5) + cortex_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.cortex_disk_utilization_threshold != null ? var.cluster_alert_thresholds.cortex_disk_utilization_threshold : 80) + prometheus_disk_utilization_threshold = var.cluster_alert_thresholds == null ? 80 : (var.cluster_alert_thresholds.prometheus_disk_utilization_threshold != null ? var.cluster_alert_thresholds.prometheus_disk_utilization_threshold : 80) } } diff --git a/kafka/aws-msk/main.tf b/kafka/aws-msk/main.tf index 8b44e810..7a127101 100644 --- a/kafka/aws-msk/main.tf +++ b/kafka/aws-msk/main.tf @@ -1,5 +1,5 @@ data "aws_subnet" "app_subnet" { - id = var.kafka_subnets[0] + id = var.kafka_subnets.0 } locals { diff --git a/kafka/aws-msk/output.tf b/kafka/aws-msk/output.tf index 2e7c07e2..a79494ba 100644 --- a/kafka/aws-msk/output.tf +++ b/kafka/aws-msk/output.tf @@ -1,5 +1,5 @@ output "zookeeper_connect_string" { - value = aws_msk_cluster.msk_cluster[*].zookeeper_connect_string + value = aws_msk_cluster.msk_cluster.*.zookeeper_connect_string } output "bootstrap_brokers_tls" { @@ -8,9 +8,9 @@ output "bootstrap_brokers_tls" { } output "bootstrap_brokers_sasl_scram" { - value = aws_msk_cluster.msk_cluster[*].bootstrap_brokers_sasl_scram + value = aws_msk_cluster.msk_cluster.*.bootstrap_brokers_sasl_scram } output "bootstrap_brokers" { - value = aws_msk_cluster.msk_cluster[*].bootstrap_brokers + value = aws_msk_cluster.msk_cluster.*.bootstrap_brokers } diff --git a/kafka/aws-msk/vars.tf b/kafka/aws-msk/vars.tf index ad89ab43..db834273 100644 --- a/kafka/aws-msk/vars.tf +++ b/kafka/aws-msk/vars.tf @@ -31,9 +31,24 @@ variable "kafka_admin_user" { type = string } +variable "app_region" { + description = "Cloud region to deploy to (e.g. us-east-1)" + type = string +} + variable "common_tags" { description = "additional tags for merging with common tags" type = map(string) default = {} } +variable "kafka_topics" { + description = "Kafka Topics to be created." + type = list(object({ + name = string + replication_factor = number + partitions = number + })) + default = [] +} + diff --git a/kops-kube/aws/kubernetes.tf b/kops-kube/aws/kubernetes.tf index 9344be6e..ec7a8aeb 100644 --- a/kops-kube/aws/kubernetes.tf +++ b/kops-kube/aws/kubernetes.tf @@ -1,17 +1,17 @@ module "remote_state_gcp_cluster" { - source = "../../remote-state/gcp" - count = var.shared_services.type == "gcp" ? 1 : 0 - bucket_name = var.shared_services.bucket - bucket_prefix = var.shared_services.cluster_prefix + source = "../../remote-state/gcp" + count = var.shared_services.type == "gcp" ? 1 : 0 + bucket_name = var.shared_services.bucket + bucket_prefix = var.shared_services.cluster_prefix } module "remote_state_aws_cluster" { - source = "../../remote-state/aws" - count = var.shared_services.type == "aws" ? 1 : 0 - bucket_name = var.shared_services.bucket - provider_id = var.shared_services.profile - bucket_prefix = var.shared_services.cluster_prefix - location = var.shared_services.location + source = "../../remote-state/aws" + count = var.shared_services.type == "aws" ? 1 : 0 + bucket_name = var.shared_services.bucket + provider_id = var.shared_services.profile + bucket_prefix = var.shared_services.cluster_prefix + location = var.shared_services.location } module "remote_state_azure_cluster" { @@ -33,7 +33,7 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } @@ -41,7 +41,7 @@ provider "kubernetes" { provider "kubectl" { load_config_file = false host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } @@ -49,7 +49,7 @@ provider "kubectl" { provider "helm" { kubernetes { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } } \ No newline at end of file diff --git a/kops-kube/azure/kubernetes.tf b/kops-kube/azure/kubernetes.tf index 221ba445..6d2c9878 100644 --- a/kops-kube/azure/kubernetes.tf +++ b/kops-kube/azure/kubernetes.tf @@ -1,17 +1,17 @@ module "remote_state_gcp_cluster" { - source = "../../remote-state/gcp" - count = var.shared_services.type == "gcp" ? 1 : 0 - bucket_name = var.shared_services.bucket - bucket_prefix = var.shared_services.cluster_prefix + source = "../../remote-state/gcp" + count = var.shared_services.type == "gcp" ? 1 : 0 + bucket_name = var.shared_services.bucket + bucket_prefix = var.shared_services.cluster_prefix } module "remote_state_aws_cluster" { - source = "../../remote-state/aws" - count = var.shared_services.type == "aws" ? 1 : 0 - bucket_name = var.shared_services.bucket - provider_id = var.shared_services.profile - bucket_prefix = var.shared_services.cluster_prefix - location = var.shared_services.location + source = "../../remote-state/aws" + count = var.shared_services.type == "aws" ? 1 : 0 + bucket_name = var.shared_services.bucket + provider_id = var.shared_services.profile + bucket_prefix = var.shared_services.cluster_prefix + location = var.shared_services.location } module "remote_state_azure_cluster" { @@ -29,26 +29,26 @@ data "azurerm_kubernetes_cluster" "cluster" { } provider "kubectl" { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) token = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].cluster_host : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].cluster_host : module.remote_state_azure_cluster[0].cluster_host) load_config_file = false } provider "kubernetes" { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) } provider "helm" { kubernetes { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) } } \ No newline at end of file diff --git a/kops-kube/gcp/kubernetes.tf b/kops-kube/gcp/kubernetes.tf index 9efda65a..7cd08d79 100644 --- a/kops-kube/gcp/kubernetes.tf +++ b/kops-kube/gcp/kubernetes.tf @@ -1,8 +1,8 @@ data "terraform_remote_state" "infra_output" { - backend = "gcs" + backend = "gcs" config = { - bucket = var.bucket_name - prefix = "${var.cluster_prefix}/terraform.tfstate" + bucket = var.bucket_name + prefix = "${var.cluster_prefix}/terraform.tfstate" } } @@ -11,6 +11,8 @@ data "google_container_cluster" "gke" { location = var.app_region } +data "google_project" "this" {} + data "google_client_config" "default" {} # Kubernetes provider @@ -23,8 +25,8 @@ data "google_client_config" "default" {} provider "kubernetes" { host = "https://${data.terraform_remote_state.infra_output.outputs.kubernetes_endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(data.terraform_remote_state.infra_output.outputs.ca_certificate) } @@ -32,8 +34,8 @@ provider "kubectl" { load_config_file = false host = "https://${data.terraform_remote_state.infra_output.outputs.kubernetes_endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(data.terraform_remote_state.infra_output.outputs.ca_certificate) } @@ -41,8 +43,8 @@ provider "helm" { kubernetes { host = "https://${data.terraform_remote_state.infra_output.outputs.kubernetes_endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(data.terraform_remote_state.infra_output.outputs.ca_certificate) } } diff --git a/object-storage/aws/vars.tf b/object-storage/aws/vars.tf index 669d7ad8..d89f1296 100644 --- a/object-storage/aws/vars.tf +++ b/object-storage/aws/vars.tf @@ -10,3 +10,8 @@ variable "enable_versioning" { default = false } +variable "force_destroy" { + description = "Whether to force destroy by cleaning up the bucket" + type = bool + default = true +} \ No newline at end of file diff --git a/observability/aws/issuer.tf b/observability/aws/issuer.tf index 2eb60210..d01925dd 100644 --- a/observability/aws/issuer.tf +++ b/observability/aws/issuer.tf @@ -1,8 +1,8 @@ resource "kubernetes_secret_v1" "namespace-cert-replicator" { for_each = toset([for env in keys(local.app_namespaces) : env if env != "default" && local.app_namespaces[env] != null]) metadata { - name = "tls-secret-replica" - namespace = kubernetes_namespace.app_environments[each.key].metadata[0].name + name = "tls-secret-replica" + namespace = kubernetes_namespace.app_environments[each.key].metadata.0.name annotations = { "replicator.v1.mittwald.de/replicate-from" = "cert-manager/wildcard-dns" } diff --git a/observability/aws/main.tf b/observability/aws/main.tf index 93298a83..a6c30a83 100644 --- a/observability/aws/main.tf +++ b/observability/aws/main.tf @@ -1,17 +1,19 @@ locals { - access_secret = urlencode(var.access_secret) - access_key = urlencode(var.access_key) - cluster_name = var.cluster_name + access_secret = urlencode(var.access_secret) + access_key = urlencode(var.access_key) + cluster_name = var.cluster_name + cluster_name_parts = split("-", local.cluster_name) + environment = var.app_env == "" ? element(local.cluster_name_parts, length(local.cluster_name_parts) - 1) : var.app_env enable_loki = try(var.loki != null ? var.loki.enable : false, false) enable_tempo = try(var.tempo != null ? var.tempo.enable : false, false) enable_cortex = try(var.cortex != null ? var.cortex.enable : false, false) - enable_mimir = try(var.mimir != null ? var.mimir.enable : false, false) + enable_mimir = try(var.mimir != null ? var.mimir.enable : false,false) - enable_ingress_loki = local.enable_loki ? (var.loki.enable_ingress != null ? var.loki.enable_ingress : false) : false - enable_ingress_tempo = local.enable_tempo ? (var.tempo.enable_ingress != null ? var.tempo.enable_ingress : false) : false - enable_ingress_mimir = local.enable_mimir ? (var.mimir.enable_ingress != null ? var.mimir.enable_ingress : false) : false - enable_ingress_cortex = local.enable_cortex ? (var.cortex.enable_ingress != null ? var.cortex.enable_ingress : false) : false + enable_ingress_loki = local.enable_loki ? (var.loki.enable_ingress != null ? var.loki.enable_ingress : false ) : false + enable_ingress_tempo = local.enable_tempo ? (var.tempo.enable_ingress != null ? var.tempo.enable_ingress : false ) : false + enable_ingress_mimir = local.enable_mimir ? (var.mimir.enable_ingress != null ? var.mimir.enable_ingress : false ) : false + enable_ingress_cortex = local.enable_cortex ? (var.cortex.enable_ingress != null ? var.cortex.enable_ingress : false ) : false app_namespaces = { loki = local.enable_loki ? { @@ -22,20 +24,24 @@ locals { services = ["tempo-distributor:9411"] ingress = local.enable_ingress_tempo } : null - cortex = local.enable_cortex ? { + cortex = local.enable_cortex ? { services = ["cortex-distributor:8080"] ingress = local.enable_ingress_cortex } : null - mimir = local.enable_mimir ? { - services = ["mimir-distributor:8080"] + mimir = local.enable_mimir ? { + services = ["mimir-distributor:8080"] ingress = local.enable_ingress_mimir } : null } + filtered_namespace = { + for key, value in local.app_namespaces : + key => value if value != null + } } resource "kubernetes_namespace" "app_environments" { - for_each = toset([for env in keys(local.app_namespaces) : env if env != "default" && local.app_namespaces[env] != null]) + for_each = toset([for env in keys(local.app_namespaces) : env if env != "default" && local.app_namespaces[env] != null] ) metadata { name = each.key @@ -55,26 +61,26 @@ locals { service_name = split(":", service)[0] service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] # domain_name backward compatible with namespace based names if app_env is not given, if app_env is given then new scheme is chosen - domain_name = "${split(":", service)[0]}.${var.domain_name}" - ns = ns - ingress_name = "${split(":", service)[0]}-ingress" + domain_name = "${split(":", service)[0]}.${var.domain_name}" + ns = ns + ingress_name = "${split(":", service)[0]}-ingress" } if local.app_namespaces[ns].ingress == true } if local.app_namespaces[ns] != null ]...) } resource "kubernetes_ingress_v1" "service_ingress" { - for_each = { for service, value in local.services_list : service => value } + for_each = {for service, value in local.services_list : service => value} metadata { name = each.value.ingress_name namespace = each.value.ns annotations = merge( { - "kubernetes.io/ingress.class" = "nginx" + "kubernetes.io/ingress.class" = "nginx" }, each.value.ns == "mimir" && local.enable_mimir ? { "nginx.ingress.kubernetes.io/auth-type" = "basic" - "nginx.ingress.kubernetes.io/auth-secret" = "mimir-basic-auth" + "nginx.ingress.kubernetes.io/auth-secret" = "mimir-basic-auth" "nginx.ingress.kubernetes.io/auth-realm" = "Authentication Required" } : {} ) @@ -97,7 +103,7 @@ resource "kubernetes_ingress_v1" "service_ingress" { } } tls { - secret_name = "tls-secret-replica" + secret_name ="tls-secret-replica" hosts = ["*.${var.domain_name}"] } } diff --git a/observability/aws/vars.tf b/observability/aws/vars.tf index 39a9cfb7..a1769d7b 100644 --- a/observability/aws/vars.tf +++ b/observability/aws/vars.tf @@ -3,6 +3,12 @@ variable "app_name" { type = string } +variable "app_env" { + description = "Application deployment environment." + type = string + default = "" +} + variable "app_region" { description = "Cloud region to deploy to (e.g. us-east1)" type = string @@ -38,20 +44,20 @@ variable "cluster_name" { variable "loki" { description = "Loki configuration for observability setup" type = object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_lines_received = optional(string) - distributor_bytes_received = optional(number) + distributor_lines_received = optional(string) + distributor_bytes_received= optional(number) distributor_appended_failures = optional(number) - request_errors = optional(number) - panics = optional(number) - request_latency = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + request_errors = optional(number) + panics = optional(number) + request_latency = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) ingester = optional(object({ replicas = optional(number) @@ -107,29 +113,29 @@ variable "loki" { variable "cortex" { description = "Cortex configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_series_per_metric = optional(number) - max_series_per_user = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_series_per_metric = optional(number) + max_series_per_user = optional(number) max_fetched_chunks_per_query = optional(number) })) query_range = optional(object({ memcached_client_timeout = optional(string) })) compactor = optional(object({ - enable = optional(bool) - replicas = optional(number) + enable = optional(bool) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -139,8 +145,8 @@ variable "cortex" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) @@ -167,7 +173,7 @@ variable "cortex" { })) query_frontend = optional(object({ replicas = optional(number) - enable = optional(bool) + enable = optional(bool) })) store_gateway = optional(object({ replication_factor = optional(number) @@ -226,23 +232,23 @@ variable "cortex" { variable "tempo" { description = "tempo configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - ingester_bytes_received = optional(number) - distributor_ingester_appends = optional(number) + ingester_bytes_received = optional(number) + distributor_ingester_appends = optional(number) distributor_ingester_append_failures = optional(number) - ingester_live_traces = optional(number) - distributor_spans_received = optional(number) - distributor_bytes_received = optional(number) - ingester_blocks_flushed = optional(number) - tempodb_blocklist = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + ingester_live_traces = optional(number) + distributor_spans_received = optional(number) + distributor_bytes_received = optional(number) + ingester_blocks_flushed = optional(number) + tempodb_blocklist = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) ingester = optional(object({ replicas = optional(number) @@ -269,10 +275,10 @@ variable "tempo" { cpu_utilization = optional(string) })) querier = optional(object({ - replicas = optional(number) + replicas = optional(number) })) query_frontend = optional(object({ - replicas = optional(number) + replicas = optional(number) })) metrics_generator = optional(object({ enable = optional(bool) @@ -280,9 +286,9 @@ variable "tempo" { service_graphs_max_items = optional(number) service_graphs_wait = optional(string) remote_write_flush_deadline = optional(string) - remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ + remote_write = optional(list(object({ + host = optional(string) + header = optional(object({ key = optional(string) value = optional(string) })) @@ -294,31 +300,31 @@ variable "tempo" { variable "mimir" { description = "mimir configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_lines_received = optional(string) - distributor_bytes_received = optional(number) + distributor_lines_received = optional(string) + distributor_bytes_received= optional(number) distributor_appended_failures = optional(number) - request_errors = optional(number) - panics = optional(number) - request_latency = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + request_errors = optional(number) + panics = optional(number) + request_latency = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_fetched_chunks_per_query = optional(number) - max_cache_freshness = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_fetched_chunks_per_query = optional(number) + max_cache_freshness = optional(number) max_outstanding_requests_per_tenant = optional(number) })) compactor = optional(object({ - replicas = optional(number) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -328,22 +334,22 @@ variable "mimir" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) querier = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) query_frontend = optional(object({ replicas = optional(number) @@ -360,11 +366,11 @@ variable "mimir" { max_memory = optional(string) })) distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) })) }) } diff --git a/observability/azure/issuer.tf b/observability/azure/issuer.tf index 2eb60210..d01925dd 100644 --- a/observability/azure/issuer.tf +++ b/observability/azure/issuer.tf @@ -1,8 +1,8 @@ resource "kubernetes_secret_v1" "namespace-cert-replicator" { for_each = toset([for env in keys(local.app_namespaces) : env if env != "default" && local.app_namespaces[env] != null]) metadata { - name = "tls-secret-replica" - namespace = kubernetes_namespace.app_environments[each.key].metadata[0].name + name = "tls-secret-replica" + namespace = kubernetes_namespace.app_environments[each.key].metadata.0.name annotations = { "replicator.v1.mittwald.de/replicate-from" = "cert-manager/wildcard-dns" } diff --git a/observability/azure/main.tf b/observability/azure/main.tf index 449d6d1b..53888bfd 100644 --- a/observability/azure/main.tf +++ b/observability/azure/main.tf @@ -1,32 +1,34 @@ locals { cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" + cluster_name_parts = split("-", local.cluster_name) + environment = var.app_env == "" ? element(local.cluster_name_parts, length(local.cluster_name_parts) - 1) : var.app_env enable_loki = try(var.loki != null ? var.loki.enable : false, false) enable_tempo = try(var.tempo != null ? var.tempo.enable : false, false) enable_cortex = try(var.cortex != null ? var.cortex.enable : false, false) - enable_mimir = try(var.mimir != null ? var.mimir.enable : false, false) + enable_mimir = try(var.mimir != null ? var.mimir.enable : false,false) - enable_ingress_loki = local.enable_loki ? (var.loki.enable_ingress != null ? var.loki.enable_ingress : false) : false - enable_ingress_tempo = local.enable_tempo ? (var.tempo.enable_ingress != null ? var.tempo.enable_ingress : false) : false - enable_ingress_mimir = local.enable_mimir ? (var.mimir.enable_ingress != null ? var.mimir.enable_ingress : false) : false - enable_ingress_cortex = local.enable_cortex ? (var.cortex.enable_ingress != null ? var.cortex.enable_ingress : false) : false + enable_ingress_loki = local.enable_loki ? (var.loki.enable_ingress != null ? var.loki.enable_ingress : false ) : false + enable_ingress_tempo = local.enable_tempo ? (var.tempo.enable_ingress != null ? var.tempo.enable_ingress : false ) : false + enable_ingress_mimir = local.enable_mimir ? (var.mimir.enable_ingress != null ? var.mimir.enable_ingress : false ) : false + enable_ingress_cortex = local.enable_cortex ? (var.cortex.enable_ingress != null ? var.cortex.enable_ingress : false ) : false app_namespaces = { - loki = local.enable_loki ? { + loki = local.enable_loki ? { services = ["loki-distributor:3100", "loki-querier:3100"] ingress = local.enable_ingress_loki } : null - tempo = local.enable_tempo ? { + tempo = local.enable_tempo ? { services = ["tempo-distributor:9411"] ingress = local.enable_ingress_tempo } : null - cortex = local.enable_cortex ? { + cortex = local.enable_cortex ? { services = ["cortex-distributor:8080"] ingress = local.enable_ingress_cortex } : null - mimir = local.enable_mimir ? { - services = ["mimir-distributor:8080"] - ingress = local.enable_ingress_mimir + mimir = local.enable_mimir ? { + services = ["mimir-distributor:8080"] + ingress = local.enable_ingress_mimir } : null } @@ -53,16 +55,16 @@ locals { service_name = split(":", service)[0] service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] # domain_name backward compatible with namespace based names if app_env is not given, if app_env is given then new scheme is chosen - domain_name = "${split(":", service)[0]}.${var.domain_name}" - ns = ns - ingress_name = "${split(":", service)[0]}-ingress" + domain_name = "${split(":", service)[0]}.${var.domain_name}" + ns = ns + ingress_name = "${split(":", service)[0]}-ingress" } if local.app_namespaces[ns].ingress == true } if local.app_namespaces[ns] != null ]...) } resource "kubernetes_ingress_v1" "service_ingress" { - for_each = { for service, value in local.services_list : service => value if var.domain_name != "" } + for_each = {for service, value in local.services_list : service => value if var.domain_name != ""} metadata { name = each.value.ingress_name namespace = each.value.ns @@ -72,7 +74,7 @@ resource "kubernetes_ingress_v1" "service_ingress" { }, each.value.ns == "mimir" && local.enable_mimir ? { "nginx.ingress.kubernetes.io/auth-type" = "basic" - "nginx.ingress.kubernetes.io/auth-secret" = "mimir-basic-auth" + "nginx.ingress.kubernetes.io/auth-secret" = "mimir-basic-auth" "nginx.ingress.kubernetes.io/auth-realm" = "Authentication Required" } : {} ) @@ -94,7 +96,7 @@ resource "kubernetes_ingress_v1" "service_ingress" { } } tls { - secret_name = "tls-secret-replica" + secret_name ="tls-secret-replica" hosts = ["*.${var.domain_name}"] } } diff --git a/observability/azure/vars.tf b/observability/azure/vars.tf index ad09786b..c3a601dd 100644 --- a/observability/azure/vars.tf +++ b/observability/azure/vars.tf @@ -26,6 +26,17 @@ variable "domain_name" { default = "" } +variable "cluster_name" { + description = "cluster name" + type = string +} + +variable "resource_group_name" { + description = "The Azure Resource Group name in which all resources should be created." + type = string + default = "" +} + variable "storage_account" { description = "The Azure Storage Account name in which data should be stored." type = string @@ -41,20 +52,20 @@ variable "account_access_key" { variable "loki" { description = "Loki configuration for observability setup" type = object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_lines_received = optional(string) - distributor_bytes_received = optional(number) + distributor_lines_received = optional(string) + distributor_bytes_received= optional(number) distributor_appended_failures = optional(number) - request_errors = optional(number) - panics = optional(number) - request_latency = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + request_errors = optional(number) + panics = optional(number) + request_latency = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) ingester = optional(object({ replicas = optional(number) @@ -110,29 +121,29 @@ variable "loki" { variable "cortex" { description = "Cortex configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_series_per_metric = optional(number) - max_series_per_user = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_series_per_metric = optional(number) + max_series_per_user = optional(number) max_fetched_chunks_per_query = optional(number) })) query_range = optional(object({ memcached_client_timeout = optional(string) })) compactor = optional(object({ - enable = optional(bool) - replicas = optional(number) + enable = optional(bool) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -142,8 +153,8 @@ variable "cortex" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) @@ -170,7 +181,7 @@ variable "cortex" { })) query_frontend = optional(object({ replicas = optional(number) - enable = optional(bool) + enable = optional(bool) })) store_gateway = optional(object({ replication_factor = optional(number) @@ -229,23 +240,23 @@ variable "cortex" { variable "tempo" { description = "tempo configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - ingester_bytes_received = optional(number) - distributor_ingester_appends = optional(number) + ingester_bytes_received = optional(number) + distributor_ingester_appends = optional(number) distributor_ingester_append_failures = optional(number) - ingester_live_traces = optional(number) - distributor_spans_received = optional(number) - distributor_bytes_received = optional(number) - ingester_blocks_flushed = optional(number) - tempodb_blocklist = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + ingester_live_traces = optional(number) + distributor_spans_received = optional(number) + distributor_bytes_received = optional(number) + ingester_blocks_flushed = optional(number) + tempodb_blocklist = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) ingester = optional(object({ replicas = optional(number) @@ -272,10 +283,10 @@ variable "tempo" { cpu_utilization = optional(string) })) querier = optional(object({ - replicas = optional(number) + replicas = optional(number) })) query_frontend = optional(object({ - replicas = optional(number) + replicas = optional(number) })) metrics_generator = optional(object({ enable = optional(bool) @@ -283,9 +294,9 @@ variable "tempo" { service_graphs_max_items = optional(number) service_graphs_wait = optional(string) remote_write_flush_deadline = optional(string) - remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ + remote_write = optional(list(object({ + host = optional(string) + header = optional(object({ key = optional(string) value = optional(string) })) @@ -297,25 +308,25 @@ variable "tempo" { variable "mimir" { description = "mimir configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_fetched_chunks_per_query = optional(number) - max_cache_freshness = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_fetched_chunks_per_query = optional(number) + max_cache_freshness = optional(number) max_outstanding_requests_per_tenant = optional(number) })) compactor = optional(object({ - replicas = optional(number) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -325,22 +336,22 @@ variable "mimir" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) querier = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) query_frontend = optional(object({ replicas = optional(number) @@ -357,11 +368,11 @@ variable "mimir" { max_memory = optional(string) })) distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) })) }) } \ No newline at end of file diff --git a/observability/gcp/issuer.tf b/observability/gcp/issuer.tf index 2eb60210..d01925dd 100644 --- a/observability/gcp/issuer.tf +++ b/observability/gcp/issuer.tf @@ -1,8 +1,8 @@ resource "kubernetes_secret_v1" "namespace-cert-replicator" { for_each = toset([for env in keys(local.app_namespaces) : env if env != "default" && local.app_namespaces[env] != null]) metadata { - name = "tls-secret-replica" - namespace = kubernetes_namespace.app_environments[each.key].metadata[0].name + name = "tls-secret-replica" + namespace = kubernetes_namespace.app_environments[each.key].metadata.0.name annotations = { "replicator.v1.mittwald.de/replicate-from" = "cert-manager/wildcard-dns" } diff --git a/observability/gcp/main.tf b/observability/gcp/main.tf index 4312e2ea..d1429594 100644 --- a/observability/gcp/main.tf +++ b/observability/gcp/main.tf @@ -1,34 +1,36 @@ locals { cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" + cluster_name_parts = split("-", local.cluster_name) + environment = var.app_env == "" ? element(local.cluster_name_parts, length(local.cluster_name_parts) - 1) : var.app_env - enable_loki = try(var.loki != null ? var.loki.enable : false, false) - enable_tempo = try(var.tempo != null ? var.tempo.enable : false, false) - enable_cortex = try(var.cortex != null ? var.cortex.enable : false, false) - enable_mimir = try(var.mimir != null ? var.mimir.enable : false, false) + enable_loki = try(var.loki != null ? var.loki.enable : false, false) + enable_tempo = try(var.tempo != null ? var.tempo.enable : false, false) + enable_cortex = try(var.cortex != null ? var.cortex.enable : false, false) + enable_mimir = try(var.mimir != null ? var.mimir.enable : false,false) enable_openobserve = length([for instance in var.openobserve : instance if instance.enable]) > 0 - enable_ingress_loki = local.enable_loki ? (var.loki.enable_ingress != null ? var.loki.enable_ingress : false) : false - enable_ingress_tempo = local.enable_tempo ? (var.tempo.enable_ingress != null ? var.tempo.enable_ingress : false) : false - enable_ingress_mimir = local.enable_mimir ? (var.mimir.enable_ingress != null ? var.mimir.enable_ingress : false) : false - enable_ingress_cortex = local.enable_cortex ? (var.cortex.enable_ingress != null ? var.cortex.enable_ingress : false) : false + enable_ingress_loki = local.enable_loki ? (var.loki.enable_ingress != null ? var.loki.enable_ingress : false ) : false + enable_ingress_tempo = local.enable_tempo ? (var.tempo.enable_ingress != null ? var.tempo.enable_ingress : false ) : false + enable_ingress_mimir = local.enable_mimir ? (var.mimir.enable_ingress != null ? var.mimir.enable_ingress : false ) : false + enable_ingress_cortex = local.enable_cortex ? (var.cortex.enable_ingress != null ? var.cortex.enable_ingress : false ) : false enable_ingress_openobserve = true app_namespaces = { - loki = local.enable_loki ? { + loki = local.enable_loki ? { services = ["loki-distributor:3100", "loki-querier:3100"] ingress = local.enable_ingress_loki } : null - tempo = local.enable_tempo ? { + tempo = local.enable_tempo ? { services = ["tempo-distributor:9411"] ingress = local.enable_ingress_tempo } : null - cortex = local.enable_cortex ? { + cortex = local.enable_cortex ? { services = ["cortex-distributor:8080"] ingress = local.enable_ingress_cortex } : null - mimir = local.enable_mimir ? { - services = ["mimir-distributor:8080"] - ingress = local.enable_ingress_mimir + mimir = local.enable_mimir ? { + services = ["mimir-distributor:8080"] + ingress = local.enable_ingress_mimir } : null openobserve = local.enable_openobserve ? { services = [for instance in var.openobserve : "${instance.name}-openobserve-standalone:5080"] @@ -55,20 +57,20 @@ resource "kubernetes_namespace" "app_environments" { locals { services_list = merge([ for ns in keys(local.app_namespaces) : { - for service in local.app_namespaces[ns].services : "${service}-${ns}" => { + for service in local.app_namespaces[ns].services : "${service}-${ns}" => { service_name = split(":", service)[0] service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] # domain_name backward compatible with namespace based names if app_env is not given, if app_env is given then new scheme is chosen - domain_name = "${split(":", service)[0]}.${var.domain_name}" - ns = ns - ingress_name = "${split(":", service)[0]}-ingress" + domain_name = "${split(":", service)[0]}.${var.domain_name}" + ns = ns + ingress_name = "${split(":", service)[0]}-ingress" } if local.app_namespaces[ns].ingress == true } if local.app_namespaces[ns] != null ]...) } resource "kubernetes_ingress_v1" "service_ingress" { - for_each = { for service, value in local.services_list : service => value if var.domain_name != "" } + for_each = {for service, value in local.services_list : service => value if var.domain_name != ""} metadata { name = each.value.ingress_name namespace = each.value.ns @@ -78,7 +80,7 @@ resource "kubernetes_ingress_v1" "service_ingress" { }, each.value.ns == "mimir" && local.enable_mimir ? { "nginx.ingress.kubernetes.io/auth-type" = "basic" - "nginx.ingress.kubernetes.io/auth-secret" = "mimir-basic-auth" + "nginx.ingress.kubernetes.io/auth-secret" = "mimir-basic-auth" "nginx.ingress.kubernetes.io/auth-realm" = "Authentication Required" } : {} ) @@ -101,7 +103,7 @@ resource "kubernetes_ingress_v1" "service_ingress" { } } tls { - secret_name = "tls-secret-replica" + secret_name ="tls-secret-replica" hosts = ["*.${var.domain_name}"] } } diff --git a/observability/gcp/vars.tf b/observability/gcp/vars.tf index 26bd4a8d..e58ca808 100644 --- a/observability/gcp/vars.tf +++ b/observability/gcp/vars.tf @@ -26,6 +26,12 @@ variable "domain_name" { default = "" } +variable "hosted_zone" { + description = "Hosted zone name for the records" + type = string + default = "" +} + variable "observability_suffix" { description = "To add a suffix to Storage Buckets in Observability Cluster" type = string @@ -45,20 +51,20 @@ variable "service_account_name_prefix" { variable "loki" { description = "Loki configuration for observability setup" type = object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_lines_received = optional(string) - distributor_bytes_received = optional(number) + distributor_lines_received = optional(string) + distributor_bytes_received= optional(number) distributor_appended_failures = optional(number) - request_errors = optional(number) - panics = optional(number) - request_latency = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + request_errors = optional(number) + panics = optional(number) + request_latency = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) ingester = optional(object({ replicas = optional(number) @@ -114,29 +120,29 @@ variable "loki" { variable "cortex" { description = "Cortex configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_series_per_metric = optional(number) - max_series_per_user = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_series_per_metric = optional(number) + max_series_per_user = optional(number) max_fetched_chunks_per_query = optional(number) })) query_range = optional(object({ memcached_client_timeout = optional(string) })) compactor = optional(object({ - enable = optional(bool) - replicas = optional(number) + enable = optional(bool) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -146,8 +152,8 @@ variable "cortex" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) @@ -174,7 +180,7 @@ variable "cortex" { })) query_frontend = optional(object({ replicas = optional(number) - enable = optional(bool) + enable = optional(bool) })) store_gateway = optional(object({ replication_factor = optional(number) @@ -233,23 +239,23 @@ variable "cortex" { variable "tempo" { description = "tempo configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - ingester_bytes_received = optional(number) - distributor_ingester_appends = optional(number) + ingester_bytes_received = optional(number) + distributor_ingester_appends = optional(number) distributor_ingester_append_failures = optional(number) - ingester_live_traces = optional(number) - distributor_spans_received = optional(number) - distributor_bytes_received = optional(number) - ingester_blocks_flushed = optional(number) - tempodb_blocklist = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + ingester_live_traces = optional(number) + distributor_spans_received = optional(number) + distributor_bytes_received = optional(number) + ingester_blocks_flushed = optional(number) + tempodb_blocklist = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) max_receiver_msg_size = optional(number) ingester = optional(object({ @@ -277,10 +283,10 @@ variable "tempo" { cpu_utilization = optional(string) })) querier = optional(object({ - replicas = optional(number) + replicas = optional(number) })) query_frontend = optional(object({ - replicas = optional(number) + replicas = optional(number) })) metrics_generator = optional(object({ enable = optional(bool) @@ -288,9 +294,9 @@ variable "tempo" { service_graphs_max_items = optional(number) service_graphs_wait = optional(string) remote_write_flush_deadline = optional(string) - remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ + remote_write = optional(list(object({ + host = optional(string) + header = optional(object({ key = optional(string) value = optional(string) })) @@ -302,25 +308,25 @@ variable "tempo" { variable "mimir" { description = "mimir configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_fetched_chunks_per_query = optional(number) - max_cache_freshness = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_fetched_chunks_per_query = optional(number) + max_cache_freshness = optional(number) max_outstanding_requests_per_tenant = optional(number) })) compactor = optional(object({ - replicas = optional(number) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -330,22 +336,22 @@ variable "mimir" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) querier = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) query_frontend = optional(object({ replicas = optional(number) @@ -362,30 +368,30 @@ variable "mimir" { max_memory = optional(string) })) distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) })) caches = optional(object({ chunks = optional(object({ - enabled = optional(bool) - replicas = optional(number) - max_item_memory = optional(number) - connection_limit = optional(number) + enabled = optional(bool) + replicas = optional(number) + max_item_memory = optional(number) + connection_limit = optional(number) })) index = optional(object({ - enabled = optional(bool) - replicas = optional(number) - max_item_memory = optional(number) - connection_limit = optional(number) + enabled = optional(bool) + replicas = optional(number) + max_item_memory = optional(number) + connection_limit = optional(number) })) metadata = optional(object({ - enabled = optional(bool) - replicas = optional(number) - max_item_memory = optional(number) - connection_limit = optional(number) + enabled = optional(bool) + replicas = optional(number) + max_item_memory = optional(number) + connection_limit = optional(number) })) })) @@ -395,16 +401,16 @@ variable "mimir" { variable "openobserve" { description = "List of OpenObserve instances to deploy" type = list(object({ - enable = bool - name = string - replicaCount = optional(number, 2) - min_cpu = optional(string, "500m") - max_cpu = optional(string, "1") - min_memory = optional(string, "512Mi") - max_memory = optional(string, "1Gi") + enable = bool + name = string + replicaCount = optional(number, 2) + min_cpu = optional(string, "500m") + max_cpu = optional(string, "1") + min_memory = optional(string, "512Mi") + max_memory = optional(string, "1Gi") enable_ingress = optional(bool, true) env = optional(list(object({ - name = string + name = string value = string })), []) })) diff --git a/observability/oci/issuer.tf b/observability/oci/issuer.tf index 2eb60210..d01925dd 100644 --- a/observability/oci/issuer.tf +++ b/observability/oci/issuer.tf @@ -1,8 +1,8 @@ resource "kubernetes_secret_v1" "namespace-cert-replicator" { for_each = toset([for env in keys(local.app_namespaces) : env if env != "default" && local.app_namespaces[env] != null]) metadata { - name = "tls-secret-replica" - namespace = kubernetes_namespace.app_environments[each.key].metadata[0].name + name = "tls-secret-replica" + namespace = kubernetes_namespace.app_environments[each.key].metadata.0.name annotations = { "replicator.v1.mittwald.de/replicate-from" = "cert-manager/wildcard-dns" } diff --git a/observability/oci/main.tf b/observability/oci/main.tf index 43aeb955..1c33265f 100644 --- a/observability/oci/main.tf +++ b/observability/oci/main.tf @@ -1,39 +1,43 @@ locals { - cluster_name = var.cluster_name + access_secret = urlencode(var.access_secret) + access_key = urlencode(var.access_key) + cluster_name = var.cluster_name + cluster_name_parts = split("-", local.cluster_name) + environment = var.app_env == "" ? element(local.cluster_name_parts, length(local.cluster_name_parts) - 1) : var.app_env enable_loki = try(var.loki != null ? var.loki.enable : false, false) enable_tempo = try(var.tempo != null ? var.tempo.enable : false, false) enable_cortex = try(var.cortex != null ? var.cortex.enable : false, false) - enable_mimir = try(var.mimir != null ? var.mimir.enable : false, false) + enable_mimir = try(var.mimir != null ? var.mimir.enable : false,false) - enable_ingress_loki = local.enable_loki ? (var.loki.enable_ingress != null ? var.loki.enable_ingress : false) : false - enable_ingress_tempo = local.enable_tempo ? (var.tempo.enable_ingress != null ? var.tempo.enable_ingress : false) : false - enable_ingress_mimir = local.enable_mimir ? (var.mimir.enable_ingress != null ? var.mimir.enable_ingress : false) : false - enable_ingress_cortex = local.enable_cortex ? (var.cortex.enable_ingress != null ? var.cortex.enable_ingress : false) : false + enable_ingress_loki = local.enable_loki ? (var.loki.enable_ingress != null ? var.loki.enable_ingress : false ) : false + enable_ingress_tempo = local.enable_tempo ? (var.tempo.enable_ingress != null ? var.tempo.enable_ingress : false ) : false + enable_ingress_mimir = local.enable_mimir ? (var.mimir.enable_ingress != null ? var.mimir.enable_ingress : false ) : false + enable_ingress_cortex = local.enable_cortex ? (var.cortex.enable_ingress != null ? var.cortex.enable_ingress : false ) : false app_namespaces = { - loki = local.enable_loki ? { + loki = local.enable_loki ? { services = ["loki-distributor:3100", "loki-querier:3100"] ingress = local.enable_ingress_loki } : null - tempo = local.enable_tempo ? { + tempo = local.enable_tempo ? { services = ["tempo-distributor:9411"] ingress = local.enable_ingress_tempo } : null - cortex = local.enable_cortex ? { + cortex = local.enable_cortex ? { services = ["cortex-distributor:8080"] ingress = local.enable_ingress_cortex } : null - mimir = local.enable_mimir ? { - services = ["mimir-distributor:8080"] - ingress = local.enable_ingress_mimir + mimir = local.enable_mimir ? { + services = ["mimir-distributor:8080"] + ingress = local.enable_ingress_mimir } : null } } resource "kubernetes_namespace" "app_environments" { - for_each = toset([for env in keys(local.app_namespaces) : env if env != "default" && local.app_namespaces[env] != null]) + for_each = toset([for env in keys(local.app_namespaces) : env if env != "default" && local.app_namespaces[env] != null] ) metadata { name = each.key @@ -53,26 +57,26 @@ locals { service_name = split(":", service)[0] service_port = length(split(":", service)) != 2 ? 80 : split(":", service)[1] # domain_name backward compatible with namespace based names if app_env is not given, if app_env is given then new scheme is chosen - domain_name = "${split(":", service)[0]}.${var.domain_name}" - ns = ns - ingress_name = "${split(":", service)[0]}-ingress" + domain_name = "${split(":", service)[0]}.${var.domain_name}" + ns = ns + ingress_name = "${split(":", service)[0]}-ingress" } if local.app_namespaces[ns].ingress == true } if local.app_namespaces[ns] != null ]...) } resource "kubernetes_ingress_v1" "service_ingress" { - for_each = { for service, value in local.services_list : service => value } + for_each = {for service, value in local.services_list : service => value} metadata { name = each.value.ingress_name namespace = each.value.ns annotations = merge( { - "kubernetes.io/ingress.class" = "nginx" + "kubernetes.io/ingress.class" = "nginx" }, each.value.ns == "mimir" && local.enable_mimir ? { "nginx.ingress.kubernetes.io/auth-type" = "basic" - "nginx.ingress.kubernetes.io/auth-secret" = "mimir-basic-auth" + "nginx.ingress.kubernetes.io/auth-secret" = "mimir-basic-auth" "nginx.ingress.kubernetes.io/auth-realm" = "Authentication Required" } : {} ) @@ -95,7 +99,7 @@ resource "kubernetes_ingress_v1" "service_ingress" { } } tls { - secret_name = "tls-secret-replica" + secret_name ="tls-secret-replica" hosts = ["*.${var.domain_name}"] } } diff --git a/observability/oci/vars.tf b/observability/oci/vars.tf index 8f484f13..5507b2ec 100644 --- a/observability/oci/vars.tf +++ b/observability/oci/vars.tf @@ -3,6 +3,12 @@ variable "app_name" { type = string } +variable "app_env" { + description = "Application deployment environment." + type = string + default = "" +} + variable "tenancy_namespace" { description = "Namespace of the tenancy" type = string @@ -24,6 +30,12 @@ variable "domain_name" { default = "" } +variable "hosted_zone" { + description = "Hosted zone name for the records" + type = string + default = "" +} + variable "access_key" { description = "OCI customer secrets access key to access the S3 storage buckets" type = string @@ -48,20 +60,20 @@ variable "observability_suffix" { variable "loki" { description = "Loki configuration for observability setup" type = object({ - enable = bool + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_lines_received = optional(string) - distributor_bytes_received = optional(number) + distributor_lines_received = optional(string) + distributor_bytes_received= optional(number) distributor_appended_failures = optional(number) - request_errors = optional(number) - panics = optional(number) - request_latency = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + request_errors = optional(number) + panics = optional(number) + request_latency = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) ingester = optional(object({ replicas = optional(number) @@ -117,29 +129,29 @@ variable "loki" { variable "cortex" { description = "Cortex configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_series_per_metric = optional(number) - max_series_per_user = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_series_per_metric = optional(number) + max_series_per_user = optional(number) max_fetched_chunks_per_query = optional(number) })) query_range = optional(object({ memcached_client_timeout = optional(string) })) compactor = optional(object({ - enable = optional(bool) - replicas = optional(number) + enable = optional(bool) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -149,8 +161,8 @@ variable "cortex" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) @@ -177,7 +189,7 @@ variable "cortex" { })) query_frontend = optional(object({ replicas = optional(number) - enable = optional(bool) + enable = optional(bool) })) store_gateway = optional(object({ replication_factor = optional(number) @@ -236,23 +248,23 @@ variable "cortex" { variable "tempo" { description = "tempo configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - ingester_bytes_received = optional(number) - distributor_ingester_appends = optional(number) + ingester_bytes_received = optional(number) + distributor_ingester_appends = optional(number) distributor_ingester_append_failures = optional(number) - ingester_live_traces = optional(number) - distributor_spans_received = optional(number) - distributor_bytes_received = optional(number) - ingester_blocks_flushed = optional(number) - tempodb_blocklist = optional(number) - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) - query_frontend_replica = optional(number) - compactor_replica = optional(number) + ingester_live_traces = optional(number) + distributor_spans_received = optional(number) + distributor_bytes_received = optional(number) + ingester_blocks_flushed = optional(number) + tempodb_blocklist = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) + query_frontend_replica = optional(number) + compactor_replica = optional(number) })) max_receiver_msg_size = optional(number) ingester = optional(object({ @@ -280,10 +292,10 @@ variable "tempo" { cpu_utilization = optional(string) })) querier = optional(object({ - replicas = optional(number) + replicas = optional(number) })) query_frontend = optional(object({ - replicas = optional(number) + replicas = optional(number) })) metrics_generator = optional(object({ enable = optional(bool) @@ -291,9 +303,9 @@ variable "tempo" { service_graphs_max_items = optional(number) service_graphs_wait = optional(string) remote_write_flush_deadline = optional(string) - remote_write = optional(list(object({ - host = optional(string) - header = optional(object({ + remote_write = optional(list(object({ + host = optional(string) + header = optional(object({ key = optional(string) value = optional(string) })) @@ -305,25 +317,25 @@ variable "tempo" { variable "mimir" { description = "mimir configuration for observability setup" - type = object({ - enable = bool + type = object({ + enable = bool enable_ingress = optional(bool) alerts = optional(object({ - distributor_replica = optional(number) - ingester_replica = optional(number) - querier_replica = optional(number) + distributor_replica = optional(number) + ingester_replica = optional(number) + querier_replica = optional(number) query_frontend_replica = optional(number) - compactor_replica = optional(number) + compactor_replica = optional(number) })) limits = optional(object({ - ingestion_rate = optional(number) - ingestion_burst_size = optional(number) - max_fetched_chunks_per_query = optional(number) - max_cache_freshness = optional(number) + ingestion_rate = optional(number) + ingestion_burst_size = optional(number) + max_fetched_chunks_per_query = optional(number) + max_cache_freshness = optional(number) max_outstanding_requests_per_tenant = optional(number) })) compactor = optional(object({ - replicas = optional(number) + replicas = optional(number) persistence_volume = optional(object({ enable = optional(bool) size = optional(string) @@ -333,22 +345,22 @@ variable "mimir" { min_memory = optional(string) max_memory = optional(string) })) - ingester = optional(object({ - replicas = optional(number) + ingester = optional(object({ + replicas = optional(number) persistence_volume = optional(object({ size = optional(string) })) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) querier = optional(object({ - replicas = optional(number) - min_memory = optional(string) - max_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) + replicas = optional(number) + min_memory = optional(string) + max_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) })) query_frontend = optional(object({ replicas = optional(number) @@ -365,11 +377,11 @@ variable "mimir" { max_memory = optional(string) })) distributor = optional(object({ - replicas = optional(number) - min_memory = optional(string) - min_cpu = optional(string) - max_cpu = optional(string) - max_memory = optional(string) + replicas = optional(number) + min_memory = optional(string) + min_cpu = optional(string) + max_cpu = optional(string) + max_memory = optional(string) })) }) } \ No newline at end of file diff --git a/redis/aws-elasticache/main.tf b/redis/aws-elasticache/main.tf index ffbdb26f..805ece29 100644 --- a/redis/aws-elasticache/main.tf +++ b/redis/aws-elasticache/main.tf @@ -1,24 +1,25 @@ locals { - cluster_prefix = var.shared_services.cluster_prefix != null ? var.shared_services.cluster_prefix : "${var.provider_id}/${var.app_env}/${var.app_name}" - cluster_name = var.app_env != "" ? "${var.app_name}-${var.app_env}" : var.app_name - db_subnets_ids = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].all_outputs.db_subnets_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].all_outputs.db_subnets_id : module.remote_state_azure_cluster[0].all_outputs.db_subnets_id) - vpc_id = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].vpc_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].vpc_id : module.remote_state_azure_cluster[0].vpc_id) + cluster_prefix = var.shared_services.cluster_prefix != null ? var.shared_services.cluster_prefix : "${var.provider_id}/${var.app_env}/${var.app_name}" + oidc_role = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].oidc_role : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].oidc_role : module.remote_state_azure_cluster[0].oidc_role) + cluster_name = var.app_env != "" ? "${var.app_name}-${var.app_env}" : "${var.app_name}" + db_subnets_ids = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].all_outputs.db_subnets_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].all_outputs.db_subnets_id : module.remote_state_azure_cluster[0].all_outputs.db_subnets_id) + vpc_id = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].vpc_id : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].vpc_id : module.remote_state_azure_cluster[0].vpc_id) } module "remote_state_gcp_cluster" { - source = "../../remote-state/gcp" - count = var.shared_services.type == "gcp" ? 1 : 0 - bucket_name = var.shared_services.bucket - bucket_prefix = local.cluster_prefix + source = "../../remote-state/gcp" + count = var.shared_services.type == "gcp" ? 1 : 0 + bucket_name = var.shared_services.bucket + bucket_prefix = local.cluster_prefix } module "remote_state_aws_cluster" { - source = "../../remote-state/aws" - count = var.shared_services.type == "aws" ? 1 : 0 - bucket_name = var.shared_services.bucket - provider_id = var.shared_services.profile - bucket_prefix = local.cluster_prefix - location = var.shared_services.location + source = "../../remote-state/aws" + count = var.shared_services.type == "aws" ? 1 : 0 + bucket_name = var.shared_services.bucket + provider_id = var.shared_services.profile + bucket_prefix = local.cluster_prefix + location = var.shared_services.location } module "remote_state_azure_cluster" { @@ -40,7 +41,7 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } @@ -77,44 +78,44 @@ resource "aws_security_group" "redis_group" { # count specifies if `var.redis.num_node_groups` is greater than 1 it creates redis in cluster mode resource "aws_elasticache_replication_group" "redis_cluster" { - count = var.redis.num_node_groups > 1 ? 1 : 0 - multi_az_enabled = true - automatic_failover_enabled = true - at_rest_encryption_enabled = true - engine_version = var.redis.engine_version - security_group_ids = [aws_security_group.redis_group.id] - replication_group_id = var.redis.name != "" && var.redis.name != null ? var.redis.name : "${local.cluster_name}-${var.namespace}-redis" - description = "redis replication group" - node_type = var.redis.node_type - parameter_group_name = "default.redis6.x.cluster.on" - port = 6379 - subnet_group_name = aws_elasticache_subnet_group.redis_subnets.name - replicas_per_node_group = var.redis.replicas_per_node_group - num_node_groups = var.redis.num_node_groups - - tags = var.tags - security_group_names = [] + count = var.redis.num_node_groups > 1 ? 1 : 0 + multi_az_enabled = true + automatic_failover_enabled = true + at_rest_encryption_enabled = true + engine_version = var.redis.engine_version + security_group_ids = [aws_security_group.redis_group.id] + replication_group_id = var.redis.name != "" && var.redis.name != null ? var.redis.name : "${local.cluster_name}-${var.namespace}-redis" + description = "redis replication group" + node_type = var.redis.node_type + parameter_group_name = "default.redis6.x.cluster.on" + port = 6379 + subnet_group_name = aws_elasticache_subnet_group.redis_subnets.name + replicas_per_node_group = var.redis.replicas_per_node_group + num_node_groups = var.redis.num_node_groups + + tags = var.tags + security_group_names = [] } # count specifies if `var.redis.num_node_groups` is not greater than 1 it creates redis in non cluster mode resource "aws_elasticache_replication_group" "redis" { count = var.redis.num_node_groups > 1 ? 0 : 1 - automatic_failover_enabled = true - multi_az_enabled = true - at_rest_encryption_enabled = true - engine_version = var.redis.engine_version - security_group_ids = [aws_security_group.redis_group.id] - replication_group_id = var.redis.name != "" && var.redis.name != null ? var.redis.name : "${local.cluster_name}-${var.namespace}-redis" - description = "redis replication group" - node_type = var.redis.node_type - num_cache_clusters = var.redis.replicas_per_node_group - parameter_group_name = "default.redis6.x" - port = 6379 - subnet_group_name = aws_elasticache_subnet_group.redis_subnets.name - tags = var.tags - - security_group_names = [] + automatic_failover_enabled = true + multi_az_enabled = true + at_rest_encryption_enabled = true + engine_version = var.redis.engine_version + security_group_ids = [aws_security_group.redis_group.id] + replication_group_id = var.redis.name != "" && var.redis.name != null ? var.redis.name : "${local.cluster_name}-${var.namespace}-redis" + description = "redis replication group" + node_type = var.redis.node_type + num_cache_clusters = var.redis.replicas_per_node_group + parameter_group_name = "default.redis6.x" + port = 6379 + subnet_group_name = aws_elasticache_subnet_group.redis_subnets.name + tags = var.tags + + security_group_names = [] } resource "aws_elasticache_subnet_group" "redis_subnets" { @@ -130,9 +131,9 @@ resource "kubernetes_service" "redis_service" { } spec { type = "ExternalName" - external_name = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].configuration_endpoint_address : aws_elasticache_replication_group.redis[0].primary_endpoint_address + external_name = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster.0.configuration_endpoint_address : aws_elasticache_replication_group.redis.0.primary_endpoint_address port { - port = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].port : aws_elasticache_replication_group.redis[0].port + port = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster.0.port : aws_elasticache_replication_group.redis.0.port } } } diff --git a/redis/aws-elasticache/outputs.tf b/redis/aws-elasticache/outputs.tf index d34623d8..7f3c06ac 100644 --- a/redis/aws-elasticache/outputs.tf +++ b/redis/aws-elasticache/outputs.tf @@ -1,25 +1,25 @@ output "elasticache_replication_group_redis_cluster" { - value = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].id : "" + value = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].id: "" } output "elasticache_replication_group_redis" { - value = var.redis.num_node_groups > 1 ? "" : aws_elasticache_replication_group.redis[0].id + value = var.redis.num_node_groups > 1? "" : aws_elasticache_replication_group.redis[0].id } output "elasticache_subnet_group" { - value = aws_elasticache_subnet_group.redis_subnets.id + value = aws_elasticache_subnet_group.redis_subnets.id } output "redis" { value = var.redis != null ? { - instance_name = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].replication_group_id : aws_elasticache_replication_group.redis[0].replication_group_id - instance_url = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].configuration_endpoint_address : aws_elasticache_replication_group.redis[0].primary_endpoint_address - port = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].port : aws_elasticache_replication_group.redis[0].port - version = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].engine_version : aws_elasticache_replication_group.redis[0].engine_version - machine_type = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].node_type : aws_elasticache_replication_group.redis[0].node_type - cluster = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster[0].id : "" - security_group = aws_security_group.redis_group.id - redis_group = var.redis.num_node_groups > 1 ? "" : aws_elasticache_replication_group.redis[0].id + instance_name = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster.0.replication_group_id : aws_elasticache_replication_group.redis.0.replication_group_id + instance_url = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster.0.configuration_endpoint_address : aws_elasticache_replication_group.redis.0.primary_endpoint_address + port = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster.0.port : aws_elasticache_replication_group.redis.0.port + version = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster.0.engine_version : aws_elasticache_replication_group.redis.0.engine_version + machine_type = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster.0.node_type : aws_elasticache_replication_group.redis.0.node_type + cluster = var.redis.num_node_groups > 1 ? aws_elasticache_replication_group.redis_cluster.0.id : "" + security_group = aws_security_group.redis_group.id + redis_group = var.redis.num_node_groups > 1 ? "" : aws_elasticache_replication_group.redis.0.id } : {} } \ No newline at end of file diff --git a/redis/azure-redis/main.tf b/redis/azure-redis/main.tf index ff79562a..be643564 100644 --- a/redis/azure-redis/main.tf +++ b/redis/azure-redis/main.tf @@ -1,22 +1,22 @@ locals { cluster_prefix = var.shared_services.cluster_prefix != null ? var.shared_services.cluster_prefix : var.app_name - cluster_name = var.app_env != "" ? "${var.app_name}-${var.app_env}" : var.app_name + cluster_name = var.app_env != "" ? "${var.app_name}-${var.app_env}" : "${var.app_name}" } module "remote_state_gcp_cluster" { - source = "../../remote-state/gcp" - count = var.shared_services.type == "gcp" ? 1 : 0 - bucket_name = var.shared_services.bucket - bucket_prefix = local.cluster_prefix + source = "../../remote-state/gcp" + count = var.shared_services.type == "gcp" ? 1 : 0 + bucket_name = var.shared_services.bucket + bucket_prefix = local.cluster_prefix } module "remote_state_aws_cluster" { - source = "../../remote-state/aws" - count = var.shared_services.type == "aws" ? 1 : 0 - bucket_name = var.shared_services.bucket - provider_id = var.shared_services.profile - bucket_prefix = local.cluster_prefix - location = var.shared_services.location + source = "../../remote-state/aws" + count = var.shared_services.type == "aws" ? 1 : 0 + bucket_name = var.shared_services.bucket + provider_id = var.shared_services.profile + bucket_prefix = local.cluster_prefix + location = var.shared_services.location } module "remote_state_azure_cluster" { @@ -34,14 +34,19 @@ data "azurerm_kubernetes_cluster" "cluster" { } provider "kubernetes" { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) } data "azurerm_key_vault" "secrets" { - name = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].azurerm_key_vault_name : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].azurerm_key_vault_name : module.remote_state_azure_cluster[0].azurerm_key_vault_name) + name = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].azurerm_key_vault_name : (var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].azurerm_key_vault_name : module.remote_state_azure_cluster[0].azurerm_key_vault_name) + resource_group_name = var.resource_group_name +} + +data "azurerm_virtual_network" "avn" { + name = var.vpc resource_group_name = var.resource_group_name } @@ -58,12 +63,12 @@ resource "azurerm_redis_cache" "redis_cluster" { resource "kubernetes_service" "redis_service" { metadata { - name = var.redis.name != "" && var.redis.name != null ? "${var.redis.name}-${var.namespace}-redis" : "${var.namespace}-redis" - namespace = var.namespace + name = var.redis.name != "" && var.redis.name != null ? "${var.redis.name}-${var.namespace}-redis" : "${var.namespace}-redis" + namespace = var.namespace } spec { - type = "ExternalName" - external_name = azurerm_redis_cache.redis_cluster.hostname + type = "ExternalName" + external_name = azurerm_redis_cache.redis_cluster.hostname port { port = azurerm_redis_cache.redis_cluster.port } diff --git a/redis/azure-redis/vars.tf b/redis/azure-redis/vars.tf index 1f29f391..b327e2b2 100644 --- a/redis/azure-redis/vars.tf +++ b/redis/azure-redis/vars.tf @@ -10,21 +10,27 @@ variable "tags" { } variable "app_region" { - type = string + type = string description = "Location where the resources to be created" + default = "" +} + +variable "vpc" { + description = "VPC the apps are going to use" + type = string default = "" } variable "redis" { description = "Inputs to provision Redis instances in the cloud platform" - type = object( - { - name = optional(string) - sku_name = string - redis_cache_capacity = number - redis_cache_family = string - redis_enable_non_ssl_port = bool - }) + type = object( + { + name = optional(string) + sku_name = string + redis_cache_capacity = number + redis_cache_family = string + redis_enable_non_ssl_port = bool + }) default = { name = "" sku_name = "Basic" @@ -41,26 +47,26 @@ variable "namespace" { } variable "app_name" { - description = "This is the name of the cluster. This name is also used to namespace all the other resources created by this module." - type = string + description = "This is the name of the cluster. This name is also used to namespace all the other resources created by this module." + type = string } variable "shared_services" { type = object({ - type = string - bucket = string - profile = optional(string) - location = optional(string) - resource_group = optional(string) + type = string + bucket = string + profile = optional(string) + location = optional(string) + resource_group = optional(string) storage_account = optional(string) - container = optional(string) - cluster_prefix = optional(string) + container = optional(string) + cluster_prefix = optional(string) }) } variable "app_env" { description = "Env of the redis cluster created" - type = string - default = "" + type = string + default = "" } \ No newline at end of file diff --git a/redis/gcp-redis/main.tf b/redis/gcp-redis/main.tf index 5d1b3014..feadb2fe 100644 --- a/redis/gcp-redis/main.tf +++ b/redis/gcp-redis/main.tf @@ -1,23 +1,23 @@ locals { cluster_prefix = var.cluster_prefix != "" ? var.cluster_prefix : "${var.provider_id}/${var.app_env}/${var.app_name}" - cluster_name = "${var.app_name}-${var.app_env}" -} + cluster_name = "${var.app_name}-${var.app_env}" +} terraform { backend "gcs" {} } data "google_compute_network" "vpc" { - name = var.vpc + name = var.vpc } data "terraform_remote_state" "infra_output" { - backend = "gcs" + backend = "gcs" config = { - bucket = var.bucket_name - prefix = "${local.cluster_prefix}/terraform.tfstate" + bucket = var.bucket_name + prefix = "${local.cluster_prefix}/terraform.tfstate" } -} +} data "google_container_cluster" "gke" { name = data.terraform_remote_state.infra_output.outputs.cluster_name @@ -29,20 +29,20 @@ data "google_client_config" "default" {} provider "kubernetes" { host = "https://${data.terraform_remote_state.infra_output.outputs.kubernetes_endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key cluster_ca_certificate = base64decode(data.terraform_remote_state.infra_output.outputs.ca_certificate) } resource "google_compute_firewall" "redis-firewall" { - name = var.redis.name != "" && var.redis.name != null ? "${var.redis.name}-firewall" : "${local.cluster_name}-${var.namespace}-firewall" - network = data.google_compute_network.vpc.self_link + name = var.redis.name != "" && var.redis.name != null ? "${ var.redis.name}-firewall" : "${local.cluster_name}-${var.namespace}-firewall" + network = data.google_compute_network.vpc.self_link - direction = "INGRESS" + direction = "INGRESS" allow { - protocol = "tcp" - ports = ["6379"] + protocol = "tcp" + ports = ["6379"] } source_ranges = [] @@ -50,54 +50,54 @@ resource "google_compute_firewall" "redis-firewall" { # count specifies if `var.num_node_groups` is greater than 1 it creates redis in cluster mode resource "google_redis_instance" "redis_cluster" { - count = var.redis.replica_count > 1 ? 1 : 0 - provider = google-beta - project = var.provider_id - name = var.redis.name != "" && var.redis.name != null ? var.redis.name : "${local.cluster_name}-${var.namespace}" - tier = var.redis.machine_type - memory_size_gb = var.redis.memory_size - connect_mode = var.redis.connect_mode - region = var.app_region - authorized_network = data.google_compute_network.vpc.self_link - redis_version = var.redis.redis_version - replica_count = var.redis.replica_count - read_replicas_mode = "READ_REPLICAS_ENABLED" - labels = var.labels + count = var.redis.replica_count > 1 ? 1 : 0 + provider = google-beta + project = var.provider_id + name = var.redis.name != "" && var.redis.name != null ? var.redis.name : "${local.cluster_name}-${var.namespace}" + tier = var.redis.machine_type + memory_size_gb = var.redis.memory_size + connect_mode = var.redis.connect_mode + region = var.app_region + authorized_network = data.google_compute_network.vpc.self_link + redis_version = var.redis.redis_version + replica_count = var.redis.replica_count + read_replicas_mode = "READ_REPLICAS_ENABLED" + labels = var.labels } # count specifies if `var.replica_count` is not greater than 1 it creates redis in non cluster mode resource "google_redis_instance" "redis" { - count = var.redis.replica_count > 1 ? 0 : 1 - provider = google-beta - project = var.provider_id - name = var.redis.name != "" && var.redis.name != null ? var.redis.name : "${local.cluster_name}-${var.namespace}" - tier = var.redis.machine_type - memory_size_gb = var.redis.memory_size - connect_mode = var.redis.connect_mode - region = var.app_region - authorized_network = data.google_compute_network.vpc.self_link - redis_version = var.redis.redis_version - labels = var.labels + count = var.redis.replica_count > 1 ? 0 : 1 + provider = google-beta + project = var.provider_id + name = var.redis.name != "" && var.redis.name != null ? var.redis.name : "${local.cluster_name}-${var.namespace}" + tier = var.redis.machine_type + memory_size_gb = var.redis.memory_size + connect_mode = var.redis.connect_mode + region = var.app_region + authorized_network = data.google_compute_network.vpc.self_link + redis_version = var.redis.redis_version + labels = var.labels } resource "random_string" "redis_name_suffix" { - length = 16 - numeric = false - lower = true - upper = false - special = false + length = 16 + numeric = false + lower = true + upper = false + special = false } resource "kubernetes_service" "redis_service" { metadata { name = var.redis.name != "" && var.redis.name != null ? "${var.redis.name}-${var.namespace}-redis" : "${var.namespace}-redis" - namespace = var.namespace + namespace = var.namespace } spec { - type = "ExternalName" - external_name = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster[0].host : google_redis_instance.redis[0].host + type = "ExternalName" + external_name = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster.0.host : google_redis_instance.redis.0.host port { - port = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster[0].port : google_redis_instance.redis[0].port + port = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster.0.port : google_redis_instance.redis.0.port } } } \ No newline at end of file diff --git a/redis/gcp-redis/outputs.tf b/redis/gcp-redis/outputs.tf index 52890bdd..00bd902c 100644 --- a/redis/gcp-redis/outputs.tf +++ b/redis/gcp-redis/outputs.tf @@ -1,11 +1,11 @@ output "redis" { value = var.redis != null ? { - instance_name = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster[0].name : google_redis_instance.redis[0].name - instance_url = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster[0].host : google_redis_instance.redis[0].host - port = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster[0].port : google_redis_instance.redis[0].port - version = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster[0].redis_version : google_redis_instance.redis[0].redis_version - machine_type = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster[0].tier : google_redis_instance.redis[0].tier - memory_size = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster[0].memory_size_gb : google_redis_instance.redis[0].memory_size_gb - firewall_name = google_compute_firewall.redis-firewall.name + instance_name = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster.0.name : google_redis_instance.redis.0.name + instance_url = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster.0.host : google_redis_instance.redis.0.host + port = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster.0.port : google_redis_instance.redis.0.port + version = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster.0.redis_version : google_redis_instance.redis.0.redis_version + machine_type = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster.0.tier : google_redis_instance.redis.0.tier + memory_size = var.redis.replica_count > 1 ? google_redis_instance.redis_cluster.0.memory_size_gb : google_redis_instance.redis.0.memory_size_gb + firewall_name = google_compute_firewall.redis-firewall.name } : {} } \ No newline at end of file diff --git a/redis/oci-redis/kubernetes.tf b/redis/oci-redis/kubernetes.tf index 1076ea13..2d6f6ec0 100644 --- a/redis/oci-redis/kubernetes.tf +++ b/redis/oci-redis/kubernetes.tf @@ -1,29 +1,31 @@ locals { - cluster_prefix = var.shared_services.cluster_prefix != null ? var.shared_services.cluster_prefix : var.app_name + cluster_prefix = var.shared_services.cluster_prefix != null ? var.shared_services.cluster_prefix : var.app_name - cluster_id = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].cluster_uid : var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].cluster_uid : module.remote_state_azure_cluster[0].cluster_uid + cluster_id = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].cluster_uid : var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].cluster_uid : module.remote_state_azure_cluster[0].cluster_uid - cluster_public_endpoint = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].kubernetes_endpoint.public_endpoint : var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].kubernetes_endpoint.public_endpoint : module.remote_state_azure_cluster[0].kubernetes_endpoint.public_endpoint + cluster_name = var.app_env == "" ? var.app_name : "${var.app_name}-${var.app_env}" - cluster_ca_certificate = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].ca_certificate : var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].ca_certificate : module.remote_state_azure_cluster[0].ca_certificate + cluster_public_endpoint = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].kubernetes_endpoint.public_endpoint : var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].kubernetes_endpoint.public_endpoint : module.remote_state_azure_cluster[0].kubernetes_endpoint.public_endpoint - db_subnets = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].db_subnets : var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].db_subnets : module.remote_state_azure_cluster[0].db_subnets + cluster_ca_certificate = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].ca_certificate : var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].ca_certificate : module.remote_state_azure_cluster[0].ca_certificate + + db_subnets = var.shared_services.type == "aws" ? module.remote_state_aws_cluster[0].db_subnets : var.shared_services.type == "gcp" ? module.remote_state_gcp_cluster[0].db_subnets : module.remote_state_azure_cluster[0].db_subnets } module "remote_state_gcp_cluster" { - source = "../../remote-state/gcp" - count = var.shared_services.type == "gcp" ? 1 : 0 - bucket_name = var.shared_services.bucket - bucket_prefix = local.cluster_prefix + source = "../../remote-state/gcp" + count = var.shared_services.type == "gcp" ? 1 : 0 + bucket_name = var.shared_services.bucket + bucket_prefix = local.cluster_prefix } module "remote_state_aws_cluster" { - source = "../../remote-state/aws" - count = var.shared_services.type == "aws" ? 1 : 0 - bucket_name = var.shared_services.bucket - provider_id = var.shared_services.profile - bucket_prefix = local.cluster_prefix - location = var.shared_services.location + source = "../../remote-state/aws" + count = var.shared_services.type == "aws" ? 1 : 0 + bucket_name = var.shared_services.bucket + provider_id = var.shared_services.profile + bucket_prefix = local.cluster_prefix + location = var.shared_services.location } module "remote_state_azure_cluster" { diff --git a/redis/oci-redis/vars.tf b/redis/oci-redis/vars.tf index dde98ae5..01bdcfc4 100644 --- a/redis/oci-redis/vars.tf +++ b/redis/oci-redis/vars.tf @@ -1,6 +1,6 @@ variable "app_name" { - description = "This is the name of the cluster. This name is also used to namespace all the other resources created by this module." - type = string + description = "This is the name of the cluster. This name is also used to namespace all the other resources created by this module." + type = string } variable "namespace" { @@ -9,33 +9,39 @@ variable "namespace" { } variable "app_region" { - type = string + type = string description = "Location where the resources to be created" - default = "" + default = "" +} + +variable "app_env" { + description = "Application deployment environment." + type = string + default = "" } variable "provider_id" { description = "OCI Compartment ID" - type = string + type = string } variable "redis" { description = "Inputs to provision Redis instances in the cloud platform" - type = object( + type = object( { - name = string - node_count = number - memory_size = number - cluster_mode = optional(string) - redis_version = optional(string) + name = string + node_count = number + memory_size = number + cluster_mode = optional(string) + redis_version = optional(string) } ) default = { - name = "" - node_count = 2 - memory_size = 5 - cluster_mode = "SHARDED" - redis_version = "REDIS_7_0" + name = "" + node_count = 2 + memory_size = 5 + cluster_mode = "SHARDED" + redis_version = "REDIS_7_0" } validation { @@ -51,13 +57,13 @@ variable "redis" { variable "shared_services" { type = object({ - type = string - bucket = string - profile = optional(string) - location = optional(string) - resource_group = optional(string) + type = string + bucket = string + profile = optional(string) + location = optional(string) + resource_group = optional(string) storage_account = optional(string) - container = optional(string) - cluster_prefix = optional(string) + container = optional(string) + cluster_prefix = optional(string) }) } \ No newline at end of file diff --git a/sql/aws-rds/main.tf b/sql/aws-rds/main.tf index d9588add..dff49828 100644 --- a/sql/aws-rds/main.tf +++ b/sql/aws-rds/main.tf @@ -1,3 +1,4 @@ +data "aws_availability_zones" "available" {} locals { enable_ssl = var.enable_ssl == true ? 1 : 0 @@ -8,8 +9,8 @@ locals { enabled_cloudwatch_logs_exports = ["postgresql", "upgrade"] version = var.postgresql_engine_version port = 5432 - parameter_group = { - key = "postgres16" + parameter_group = { + key = "postgres16" values = [ { name = "log_connections" @@ -25,18 +26,18 @@ locals { }, { name = "rds.force_ssl" - value = local.enable_ssl + value = "${local.enable_ssl}" } ] } } - "mysql" = { + "mysql" = { type = "mysql" enabled_cloudwatch_logs_exports = ["error", "slowquery"] version = var.mysql_engine_version port = 3306 - parameter_group = { - key = "mysql8.0" + parameter_group = { + key = "mysql8.0" values = [ { name = "character_set_server" @@ -63,12 +64,12 @@ locals { resource "aws_db_subnet_group" "db_subnet" { name = "${var.rds_name}-sg" - subnet_ids = var.db_subnets + subnet_ids = var.db_subnets tags = merge(var.tags, - tomap({ - "Name" = var.rds_name - }) + tomap({ + "Name" = var.rds_name + }) ) } @@ -91,9 +92,9 @@ resource "aws_security_group" "rds" { } tags = merge(var.tags, - tomap({ - "Name" = var.rds_name - }) + tomap({ + "Name" = var.rds_name + }) ) } @@ -110,9 +111,9 @@ resource "aws_db_parameter_group" "db_param_group" { } tags = merge(var.tags, - tomap({ - "Name" = var.rds_name - }) + tomap({ + "Name" = var.rds_name + }) ) } @@ -140,19 +141,19 @@ resource "aws_db_instance" "db_instance" { storage_type = var.storage_tier auto_minor_version_upgrade = var.auto_minor_version_upgrade tags = merge(var.tags, - tomap({ - "Name" = var.rds_name - }) + tomap({ + "Name" = var.rds_name + }) ) } resource "aws_db_instance" "rds_read_replica" { - count = var.read_replica ? 1 : 0 + count = var.read_replica ? 1 : 0 identifier = "rds-read-replica-${var.rds_name}" publicly_accessible = false storage_encrypted = true - instance_class = var.instance_class + instance_class = var.instance_class replicate_source_db = aws_db_instance.db_instance.id allocated_storage = var.allocated_storage vpc_security_group_ids = [aws_security_group.rds.id] @@ -167,8 +168,8 @@ resource "aws_db_instance" "rds_read_replica" { storage_type = var.storage_tier auto_minor_version_upgrade = var.auto_minor_version_upgrade tags = merge(var.tags, - tomap({ - "Name" = "rds-read-replica-${var.rds_name}" - }) + tomap({ + "Name" = "rds-read-replica-${var.rds_name}" + }) ) } \ No newline at end of file diff --git a/sql/aws-rds/outputs.tf b/sql/aws-rds/outputs.tf index aa10d733..3d7e705f 100644 --- a/sql/aws-rds/outputs.tf +++ b/sql/aws-rds/outputs.tf @@ -20,7 +20,7 @@ output "db_name" { } output "rds_read_replica_db_url" { - value = element(concat(aws_db_instance.rds_read_replica[*].endpoint, [""]), 0) + value = element(concat(aws_db_instance.rds_read_replica.*.endpoint, [""]), 0) } output "db_type" { @@ -40,5 +40,5 @@ output "db_instance_class" { } output "db_user" { - value = { for k, v in local.db_map : k => v.user } + value = { for k,v in local.db_map : k => v.user} } \ No newline at end of file diff --git a/sql/aws-rds/vars.tf b/sql/aws-rds/vars.tf index d83a297a..8b6c8637 100644 --- a/sql/aws-rds/vars.tf +++ b/sql/aws-rds/vars.tf @@ -16,6 +16,12 @@ variable "db_subnets" { default = [] } +variable "aws_region" { + description = "The AWS region to deploy to (e.g. us-east-1)" + type = string + default = "" +} + variable "cluster_name" { description = "Name of the cluster to which RDS instance is attached with" type = string @@ -91,6 +97,12 @@ variable "max_allocated_storage" { default = 0 } +variable "monitoring_interval" { + description = "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 5. Valid Values: 0, 1, 5, 10, 15, 30, 60." + default = 0 + type = number +} + variable "log_min_duration_statement" { description = "Sets the minimum execution time above which all statements will be logged." type = number diff --git a/sql/azure-mysql/vars.tf b/sql/azure-mysql/vars.tf index 07018749..88586f69 100644 --- a/sql/azure-mysql/vars.tf +++ b/sql/azure-mysql/vars.tf @@ -22,6 +22,12 @@ variable "administrator_login" { default = "mysqladmin" } +variable "administrator_password" { + description = "The admin password for mysql database" + type = string + default = "" +} + variable "mysql_version" { description = "Version of the mysql database" type = string @@ -92,11 +98,11 @@ variable "storage" { type = number default = 20 validation { - condition = (var.storage >= 20) + condition = (var.storage >= 20 ) error_message = "Storage value must be greater than or equal to 20." } validation { - condition = (var.storage <= 16384) + condition = (var.storage <= 16384 ) error_message = "Storage value must be less than or equal to 16384." } } @@ -112,11 +118,11 @@ variable "iops" { type = number default = 360 validation { - condition = (var.iops >= 360) + condition = (var.iops >= 360 ) error_message = "IOPS value must be greater than or equal to 360." } validation { - condition = (var.iops <= 20000) + condition = (var.iops <= 20000 ) error_message = "IOPS value must be less than or equal to 20000." } } diff --git a/sql/azure-postgres/vars.tf b/sql/azure-postgres/vars.tf index ea44c6f3..41cba224 100644 --- a/sql/azure-postgres/vars.tf +++ b/sql/azure-postgres/vars.tf @@ -88,8 +88,14 @@ variable "namespace" { } variable "key_vault_id" { - type = string - default = "" + type = string + default = "" +} + +variable "zone" { + description = "zone for resources" + type = number + default = 2 } variable "enable_ssl" { @@ -103,11 +109,11 @@ variable "storage_mb" { type = number default = 32768 validation { - condition = (var.storage_mb >= 32768) + condition = (var.storage_mb >= 32768 ) error_message = "Storage value must be greater than or equal to 20." } validation { - condition = (var.storage_mb <= 33553408) + condition = (var.storage_mb <= 33553408 ) error_message = "Storage value must be less than or equal to 16384." } } diff --git a/sql/gcp-sql/main.tf b/sql/gcp-sql/main.tf index 60481564..df9d068c 100644 --- a/sql/gcp-sql/main.tf +++ b/sql/gcp-sql/main.tf @@ -1,47 +1,47 @@ locals { db_type = { "postgresql" = { - type = "postgres" - enabled_cloudwatch_logs_exports = ["postgresql", "upgrade"] - version = var.sql_version != "" ? var.sql_version : "POSTGRES_14" - port = 5432 + type = "postgres" + enabled_cloudwatch_logs_exports = ["postgresql", "upgrade"] + version = var.sql_version != "" ? var.sql_version : "POSTGRES_14" + port = 5432 } "mysql" = { - type = "mysql" - enabled_cloudwatch_logs_exports = ["error", "slowquery"] - version = var.sql_version != "" ? var.sql_version : "MYSQL_8_0" - port = 3306 + type = "mysql" + enabled_cloudwatch_logs_exports = ["error", "slowquery"] + version = var.sql_version != "" ? var.sql_version : "MYSQL_8_0" + port = 3306 } } } resource "google_compute_firewall" "sql-ingress-firewall" { - name = var.multi_ds ? "${var.app_uid}-${var.sql_name}-ingress" : "${var.app_uid}-ingress" - description = "${var.sql_name}-ingress-firewall" - network = var.vpc_name + name = var.multi_ds ? "${var.app_uid}-${var.sql_name}-ingress" : "${var.app_uid}-ingress" + description = "${var.sql_name}-ingress-firewall" + network = var.vpc_name - direction = "INGRESS" + direction = "INGRESS" allow { - protocol = "tcp" - ports = [local.db_type[var.sql_type].port] + protocol = "tcp" + ports = ["${local.db_type[var.sql_type].port}"] } - source_ranges = var.ext_rds_sg_cidr_block + source_ranges = var.ext_rds_sg_cidr_block } resource "google_compute_firewall" "sql-egress-firewall" { - name = var.multi_ds ? "${var.app_uid}-${var.sql_name}-egress" : "${var.app_uid}-egress" - network = var.vpc_name + name = var.multi_ds ? "${var.app_uid}-${var.sql_name}-egress" : "${var.app_uid}-egress" + network = var.vpc_name - direction = "EGRESS" + direction = "EGRESS" allow { - protocol = "tcp" - ports = [local.db_type[var.sql_type].port] + protocol = "tcp" + ports = ["${local.db_type[var.sql_type].port}"] } - source_ranges = var.ext_rds_sg_cidr_block + source_ranges = var.ext_rds_sg_cidr_block } resource "google_sql_database_instance" "postgres_sql_db" { @@ -56,23 +56,23 @@ resource "google_sql_database_instance" "postgres_sql_db" { deletion_protection = var.deletion_protection settings { - tier = var.machine_type - activation_policy = var.activation_policy - disk_size = var.disk_size - disk_type = var.disk_type - availability_type = var.availability_type - user_labels = var.labels + tier = var.machine_type + activation_policy = var.activation_policy + disk_size = var.disk_size + disk_type = var.disk_type + availability_type = var.availability_type + user_labels = var.labels deletion_protection_enabled = var.deletion_protection ip_configuration { - ipv4_enabled = false - private_network = var.vpc_name - require_ssl = var.enable_ssl + ipv4_enabled = false + private_network = var.vpc_name + require_ssl = var.enable_ssl } backup_configuration { - enabled = var.availability_type == "REGIONAL" ? true : false - point_in_time_recovery_enabled = var.availability_type == "REGIONAL" ? true : false + enabled = var.availability_type == "REGIONAL" ? true : false + point_in_time_recovery_enabled = var.availability_type == "REGIONAL" ? true : false } } @@ -90,35 +90,35 @@ resource "google_sql_database_instance" "sql_db" { deletion_protection = var.deletion_protection settings { - tier = var.machine_type - activation_policy = var.activation_policy - disk_size = var.disk_size - disk_type = var.disk_type - availability_type = var.availability_type - user_labels = var.labels + tier = var.machine_type + activation_policy = var.activation_policy + disk_size = var.disk_size + disk_type = var.disk_type + availability_type = var.availability_type + user_labels = var.labels deletion_protection_enabled = var.deletion_protection ip_configuration { - ipv4_enabled = false - private_network = var.vpc_name - require_ssl = var.enable_ssl + ipv4_enabled = false + private_network = var.vpc_name + require_ssl = var.enable_ssl } backup_configuration { - binary_log_enabled = var.availability_type == "REGIONAL" ? true : false - enabled = var.availability_type == "REGIONAL" ? true : false + binary_log_enabled = var.availability_type == "REGIONAL" ? true : false + enabled = var.availability_type == "REGIONAL" ? true : false } } } resource "google_sql_database" "sql_database" { - for_each = local.db_map - name = each.value.db_name - project = var.project_id - instance = var.sql_type == "postgresql" ? google_sql_database_instance.postgres_sql_db[0].name : google_sql_database_instance.sql_db[0].name - charset = "UTF8" - collation = var.db_collation + for_each = local.db_map + name = each.value.db_name + project = var.project_id + instance = var.sql_type == "postgresql" ? google_sql_database_instance.postgres_sql_db[0].name : google_sql_database_instance.sql_db[0].name + charset = "UTF8" + collation = var.db_collation lifecycle { ignore_changes = [ charset, @@ -137,36 +137,36 @@ resource "google_sql_user" "sql_user" { #REPLICATION resource "google_sql_database_instance" "sql_db_replica" { - count = var.read_replica ? 1 : 0 - provider = google-beta - name = "read-replica-${var.sql_name}" - project = var.project_id - region = var.region - database_version = local.db_type[var.sql_type].version - deletion_protection = var.deletion_protection - master_instance_name = var.sql_type == "postgresql" ? google_sql_database_instance.postgres_sql_db[0].name : google_sql_database_instance.sql_db[0].name + count = var.read_replica ? 1 : 0 + provider = google-beta + name = "read-replica-${var.sql_name}" + project = var.project_id + region = var.region + database_version = local.db_type[var.sql_type].version + deletion_protection = var.deletion_protection + master_instance_name = var.sql_type == "postgresql" ? google_sql_database_instance.postgres_sql_db[0].name : google_sql_database_instance.sql_db[0].name settings { - tier = var.machine_type - activation_policy = var.activation_policy - disk_type = var.disk_type - availability_type = "ZONAL" - user_labels = var.labels + tier = var.machine_type + activation_policy = var.activation_policy + disk_type = var.disk_type + availability_type = "ZONAL" + user_labels = var.labels deletion_protection_enabled = var.deletion_protection } } resource "google_sql_ssl_cert" "postgresql_db_cert" { - count = var.sql_type == "postgresql" && var.enable_ssl ? 1 : 0 - common_name = "${google_sql_database_instance.postgres_sql_db[0].name}_${var.sql_type}_ssl_certificates" - instance = google_sql_database_instance.postgres_sql_db[0].name - depends_on = [google_sql_database_instance.postgres_sql_db] + count = var.sql_type == "postgresql" && var.enable_ssl ? 1 : 0 + common_name = "${google_sql_database_instance.postgres_sql_db[0].name}_${var.sql_type}_ssl_certificates" + instance = google_sql_database_instance.postgres_sql_db[0].name + depends_on = [google_sql_database_instance.postgres_sql_db] } resource "google_sql_ssl_cert" "sql_db_cert" { - count = var.sql_type == "mysql" && var.enable_ssl ? 1 : 0 - common_name = "${google_sql_database_instance.sql_db[0].name}_${var.sql_type}_ssl_certificates" - instance = google_sql_database_instance.sql_db[0].name - depends_on = [google_sql_database_instance.sql_db] + count = var.sql_type == "mysql" && var.enable_ssl ? 1 : 0 + common_name = "${google_sql_database_instance.sql_db[0].name}_${var.sql_type}_ssl_certificates" + instance = google_sql_database_instance.sql_db[0].name + depends_on = [google_sql_database_instance.sql_db] } \ No newline at end of file diff --git a/sql/gcp-sql/outputs.tf b/sql/gcp-sql/outputs.tf index d7722822..7d95128b 100644 --- a/sql/gcp-sql/outputs.tf +++ b/sql/gcp-sql/outputs.tf @@ -20,7 +20,7 @@ output "db_name" { } output "read_replica_db_url" { - value = var.read_replica == true ? google_sql_database_instance.sql_db_replica[0].connection_name : null + value = var.read_replica == true ? google_sql_database_instance.sql_db_replica.0.connection_name : null } output "db_type" { @@ -40,5 +40,5 @@ output "db_tier" { } output "db_user" { - value = { for k, v in local.db_map : k => v.user } + value = { for k,v in local.db_map : k => v.user} } diff --git a/sql/gcp-sql/vars.tf b/sql/gcp-sql/vars.tf index 8a467298..849f57ff 100644 --- a/sql/gcp-sql/vars.tf +++ b/sql/gcp-sql/vars.tf @@ -79,12 +79,25 @@ variable "activation_policy" { default = "ALWAYS" } +variable "authorized_networks" { + description = "A list of authorized CIDR-formatted IP address ranges that can connect to this DB. Only applies to public IP instances." + type = list(map(string)) + default = [] + +} + variable "availability_type" { description = "The availability type of the Cloud SQL instance, high availability (REGIONAL) or single zone (ZONAL)" type = string default = "ZONAL" } +variable "disk_autoresize" { + description = "Second Generation only. Configuration to increase storage size automatically." + type = bool + default = false +} + variable "disk_size" { description = "Second generation only. The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased." type = number @@ -97,6 +110,17 @@ variable "disk_type" { default = "PD_SSD" } +variable "require_ssl" { + description = "True if the instance should require SSL/TLS for users connecting over IP. Note: SSL/TLS is needed to provide security when you connect to Cloud SQL using IP addresses. If you are connecting to your instance only by using the Cloud SQL Proxy or the Java Socket Library, you do not need to configure your instance to use SSL/TLS." + type = bool + default = false +} + +variable "private_network" { + description = "The resource link for the VPC network from which the Cloud SQL instance is accessible for private IP." + type = string + default = null +} variable "read_replica" { @@ -105,6 +129,20 @@ variable "read_replica" { default = false } +variable "num_read_replicas" { + description = "The number of read replicas to create. Cloud SQL will replicate all data from the master to these replicas, which you can use to horizontally scale read traffic." + type = number + default = 0 +} + +variable "read_replica_zones" { + description = "A list of compute zones where read replicas should be created. List size should match 'num_read_replicas'" + type = list(string) + default = [] + + # Example: + # default = ["us-central1-b", "us-central1-c"] +} variable "deletion_protection" { @@ -122,9 +160,9 @@ variable "ext_rds_sg_cidr_block" { variable "db_collation" { description = "Collation to be used while creating the DB" type = string - default = "en_US.UTF8" + default = "en_US.UTF8" } - + variable "labels" { description = "Common Labels on the resources" type = map(string) diff --git a/sql/oci-mysql/secrets.tf b/sql/oci-mysql/secrets.tf index 9ded89bd..818e2eea 100644 --- a/sql/oci-mysql/secrets.tf +++ b/sql/oci-mysql/secrets.tf @@ -21,52 +21,52 @@ resource "random_password" "db_admin_password" { resource "random_string" "mysql_username" { length = 6 special = false - for_each = { for v in var.databases : v => v if v != null } + for_each = {for v in var.databases : v => v if v != null} } resource "random_password" "mysql_editor_password" { - length = 16 - special = true - upper = true - lower = true - numeric = true - min_special = 2 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - for_each = local.db_map + length = 16 + special = true + upper = true + lower = true + numeric = true + min_special = 2 + min_upper = 2 + min_lower = 2 + min_numeric = 2 + for_each = local.db_map } resource "random_password" "mysql_reader_password" { - length = 16 - special = true - upper = true - lower = true - numeric = true - min_special = 2 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - for_each = local.db_map + length = 16 + special = true + upper = true + lower = true + numeric = true + min_special = 2 + min_upper = 2 + min_lower = 2 + min_numeric = 2 + for_each = local.db_map } resource "oci_vault_secret" "admin_password_secret" { compartment_id = var.provider_id secret_name = "${var.mysql_db_system_name}-mysql-db-secret" secret_content { - content_type = "base64" - content = base64encode(random_password.db_admin_password.result) + content_type = "base64" + content = base64encode(random_password.db_admin_password.result) } - key_id = var.key_id - vault_id = var.vault_id + key_id = var.key_id + vault_id = var.vault_id } resource "oci_vault_secret" "mysql_db_user_secret" { - for_each = local.db_map - compartment_id = var.provider_id - secret_name = "${var.mysql_db_system_name}-${replace(each.key, "_", "-")}-mysql-db-user" - vault_id = var.vault_id - key_id = var.key_id + for_each = local.db_map + compartment_id = var.provider_id + secret_name = "${var.mysql_db_system_name}-${replace(each.key, "_", "-")}-mysql-db-user" + vault_id = var.vault_id + key_id = var.key_id secret_content { content_type = "BASE64" @@ -75,11 +75,11 @@ resource "oci_vault_secret" "mysql_db_user_secret" { } resource "oci_vault_secret" "mysql_db_editor_secret" { - for_each = local.db_map - compartment_id = var.provider_id - secret_name = "${var.mysql_db_system_name}-${replace(each.key, "_", "-")}-mysql-db" - vault_id = var.vault_id - key_id = var.key_id + for_each = local.db_map + compartment_id = var.provider_id + secret_name = "${var.mysql_db_system_name}-${replace(each.key, "_", "-")}-mysql-db" + vault_id = var.vault_id + key_id = var.key_id secret_content { content_type = "BASE64" @@ -88,11 +88,11 @@ resource "oci_vault_secret" "mysql_db_editor_secret" { } resource "oci_vault_secret" "mysql_db_readonly_secret" { - for_each = local.db_map - compartment_id = var.provider_id - secret_name = "${var.mysql_db_system_name}-${replace(each.key, "_", "-")}-mysql-readonly" - vault_id = var.vault_id - key_id = var.key_id + for_each = local.db_map + compartment_id = var.provider_id + secret_name = "${var.mysql_db_system_name}-${replace(each.key, "_", "-")}-mysql-readonly" + vault_id = var.vault_id + key_id = var.key_id secret_content { content_type = "BASE64" @@ -104,6 +104,11 @@ data "oci_secrets_secretbundle" "admin_password_bundle" { secret_id = oci_vault_secret.admin_password_secret.id } +data "oci_secrets_secretbundle" "mysql_db_user_secret" { + for_each = local.db_map + secret_id = oci_vault_secret.mysql_db_user_secret[each.key].id +} + data "oci_secrets_secretbundle" "mysql_db_editor_secret" { for_each = local.db_map secret_id = oci_vault_secret.mysql_db_editor_secret[each.key].id diff --git a/sql/oci-mysql/vars.tf b/sql/oci-mysql/vars.tf index 73ef83ed..ca959d97 100644 --- a/sql/oci-mysql/vars.tf +++ b/sql/oci-mysql/vars.tf @@ -1,21 +1,21 @@ variable "subnet_id" { description = "Subnet id to host the mysql database" - type = string + type = string } variable "provider_id" { description = "OCI Compartment ID" - type = string + type = string } variable "mysql_shape_name" { description = "Shape of the mysql instance" - type = string + type = string } variable "availability_domain" { description = "Availability domain to install the mysql instance" - type = string + type = string } variable "administrator_login" { @@ -24,24 +24,29 @@ variable "administrator_login" { default = "mysqladmin" } +variable "cluster_name" { + description = "Name of the oke cluster" + type = string +} + variable "namespace" { description = "Namespace for the mysql service" - type = string + type = string } variable "vault_id" { description = "Kms vault id for vault secret" - type = string + type = string } variable "key_id" { description = "Kms key id for vault secret" - type = string + type = string } variable "mysql_db_system_name" { description = "Name of the Mysql db system" - type = string + type = string } variable "storage" { @@ -73,6 +78,11 @@ variable "backup_retention_days" { default = 7 } +variable "tags" { + description = "Tags for oci resources" + type = map(any) +} + variable "read_replica" { description = "Whether to enable the read replica" type = bool diff --git a/sql/oci-postgres/secrets.tf b/sql/oci-postgres/secrets.tf index 8bf5033d..382cfc91 100644 --- a/sql/oci-postgres/secrets.tf +++ b/sql/oci-postgres/secrets.tf @@ -20,52 +20,52 @@ resource "random_password" "db_admin_password" { resource "random_string" "postgres_username" { length = 6 special = false - for_each = { for v in var.databases : v => v if v != null } + for_each = {for v in var.databases : v => v if v != null} } resource "random_password" "postgres_editor_password" { - length = 16 - special = true - upper = true - lower = true - numeric = true - min_special = 2 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - for_each = local.db_map + length = 16 + special = true + upper = true + lower = true + numeric = true + min_special = 2 + min_upper = 2 + min_lower = 2 + min_numeric = 2 + for_each = local.db_map } resource "random_password" "postgres_reader_password" { - length = 16 - special = true - upper = true - lower = true - numeric = true - min_special = 2 - min_upper = 2 - min_lower = 2 - min_numeric = 2 - for_each = local.db_map + length = 16 + special = true + upper = true + lower = true + numeric = true + min_special = 2 + min_upper = 2 + min_lower = 2 + min_numeric = 2 + for_each = local.db_map } resource "oci_vault_secret" "admin_password_secret" { compartment_id = var.provider_id secret_name = "${var.postgres_db_system_name}-postgres-db-secret" secret_content { - content_type = "base64" - content = base64encode(random_password.db_admin_password.result) + content_type = "base64" + content = base64encode(random_password.db_admin_password.result) } - key_id = var.key_id - vault_id = var.vault_id + key_id = var.key_id + vault_id = var.vault_id } resource "oci_vault_secret" "postgres_db_user_secret" { - for_each = local.db_map - compartment_id = var.provider_id - secret_name = "${var.postgres_db_system_name}-${replace(each.key, "_", "-")}-postgres-user" - vault_id = var.vault_id - key_id = var.key_id + for_each = local.db_map + compartment_id = var.provider_id + secret_name = "${var.postgres_db_system_name}-${replace(each.key, "_", "-")}-postgres-user" + vault_id = var.vault_id + key_id = var.key_id secret_content { content_type = "BASE64" @@ -74,11 +74,11 @@ resource "oci_vault_secret" "postgres_db_user_secret" { } resource "oci_vault_secret" "postgres_db_editor_secret" { - for_each = local.db_map - compartment_id = var.provider_id - secret_name = "${var.postgres_db_system_name}-${replace(each.key, "_", "-")}-postgres-db" - vault_id = var.vault_id - key_id = var.key_id + for_each = local.db_map + compartment_id = var.provider_id + secret_name = "${var.postgres_db_system_name}-${replace(each.key, "_", "-")}-postgres-db" + vault_id = var.vault_id + key_id = var.key_id secret_content { content_type = "BASE64" @@ -87,11 +87,11 @@ resource "oci_vault_secret" "postgres_db_editor_secret" { } resource "oci_vault_secret" "postgres_db_readonly_secret" { - for_each = local.db_map - compartment_id = var.provider_id - secret_name = "${var.postgres_db_system_name}-${replace(each.key, "_", "-")}-postgres-readonly" - vault_id = var.vault_id - key_id = var.key_id + for_each = local.db_map + compartment_id = var.provider_id + secret_name = "${var.postgres_db_system_name}-${replace(each.key, "_", "-")}-postgres-readonly" + vault_id = var.vault_id + key_id = var.key_id secret_content { content_type = "BASE64" @@ -103,6 +103,11 @@ data "oci_secrets_secretbundle" "admin_password_bundle" { secret_id = oci_vault_secret.admin_password_secret.id } +data "oci_secrets_secretbundle" "postgres_db_user_secret" { + for_each = local.db_map + secret_id = oci_vault_secret.postgres_db_user_secret[each.key].id +} + data "oci_secrets_secretbundle" "postgres_db_editor_secret" { for_each = local.db_map secret_id = oci_vault_secret.postgres_db_editor_secret[each.key].id diff --git a/sql/oci-postgres/vars.tf b/sql/oci-postgres/vars.tf index 6c47a7d0..cabb8d9a 100644 --- a/sql/oci-postgres/vars.tf +++ b/sql/oci-postgres/vars.tf @@ -24,6 +24,11 @@ variable "administrator_login" { default = "postgresadmin" } +variable "cluster_name" { + description = "Name of the OKE cluster" + type = string +} + variable "namespace" { description = "Namespace for the PostgreSQL service" type = string @@ -44,6 +49,11 @@ variable "postgres_db_system_name" { type = string } +variable "tags" { + description = "Tags for OCI resources" + type = map(any) +} + variable "databases" { description = "Specifies the name of the PostgreSQL Database" type = list(string) @@ -61,23 +71,23 @@ variable "iops" { type = number default = 75000 validation { - condition = (var.iops >= 75000) + condition = (var.iops >= 75000 ) error_message = "IOPS value must be greater than or equal to 75000." } validation { - condition = (var.iops <= 750000) + condition = (var.iops <= 750000 ) error_message = "IOPS value must be less than or equal to 750000." } } variable "system_type" { - description = "System type of the Postgres database" - type = string - default = "OCI_OPTIMIZED_STORAGE" + description = "System type of the Postgres database" + type = string + default = "OCI_OPTIMIZED_STORAGE" } variable "instance_count" { - description = "Count of the instance" - type = number - default = 1 + description = "Count of the instance" + type = number + default = 1 } \ No newline at end of file diff --git a/zop-system/aws/kubernetes.tf b/zop-system/aws/kubernetes.tf index 5c7e2471..ef70e0cf 100644 --- a/zop-system/aws/kubernetes.tf +++ b/zop-system/aws/kubernetes.tf @@ -8,7 +8,7 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } @@ -16,7 +16,7 @@ provider "kubernetes" { provider "kubectl" { load_config_file = false host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } @@ -24,7 +24,7 @@ provider "kubectl" { provider "helm" { kubernetes { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) token = data.aws_eks_cluster_auth.cluster.token } } \ No newline at end of file diff --git a/zop-system/azure/kubernetes.tf b/zop-system/azure/kubernetes.tf index 662b2a9e..82365050 100644 --- a/zop-system/azure/kubernetes.tf +++ b/zop-system/azure/kubernetes.tf @@ -4,26 +4,26 @@ data "azurerm_kubernetes_cluster" "cluster" { } provider "kubectl" { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) - token = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) + token = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host load_config_file = false } provider "kubernetes" { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) } provider "helm" { kubernetes { - host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].host - client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_certificate) - client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].client_key) - cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config[0].cluster_ca_certificate) + host = data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.host + client_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_certificate) + client_key = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.client_key) + cluster_ca_certificate = base64decode(data.azurerm_kubernetes_cluster.cluster.kube_admin_config.0.cluster_ca_certificate) } } \ No newline at end of file diff --git a/zop-system/gcp/kubernetes.tf b/zop-system/gcp/kubernetes.tf index dd6e126d..3eee3a42 100644 --- a/zop-system/gcp/kubernetes.tf +++ b/zop-system/gcp/kubernetes.tf @@ -3,32 +3,34 @@ data "google_container_cluster" "gke" { location = var.app_region } +data "google_project" "this" {} + data "google_client_config" "default" {} provider "kubernetes" { host = "https://${data.google_container_cluster.gke.endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key - cluster_ca_certificate = base64decode(data.google_container_cluster.gke.master_auth[0].cluster_ca_certificate) + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key + cluster_ca_certificate = base64decode(data.google_container_cluster.gke.master_auth.0.cluster_ca_certificate ) } provider "kubectl" { load_config_file = false host = "https://${data.google_container_cluster.gke.endpoint}" token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key - cluster_ca_certificate = base64decode(data.google_container_cluster.gke.master_auth[0].cluster_ca_certificate) + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key + cluster_ca_certificate = base64decode(data.google_container_cluster.gke.master_auth.0.cluster_ca_certificate ) } provider "helm" { kubernetes { - host = "https://${data.google_container_cluster.gke.endpoint}" - token = data.google_client_config.default.access_token - client_certificate = data.google_container_cluster.gke.master_auth[0].client_certificate - client_key = data.google_container_cluster.gke.master_auth[0].client_key - cluster_ca_certificate = base64decode(data.google_container_cluster.gke.master_auth[0].cluster_ca_certificate) + host = "https://${data.google_container_cluster.gke.endpoint}" + token = data.google_client_config.default.access_token + client_certificate = data.google_container_cluster.gke.master_auth.0.client_certificate + client_key = data.google_container_cluster.gke.master_auth.0.client_key + cluster_ca_certificate = base64decode(data.google_container_cluster.gke.master_auth.0.cluster_ca_certificate ) } }