|
| 1 | +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. |
| 2 | +// SPDX-License-Identifier: MIT |
| 3 | + |
| 4 | +module "common" { |
| 5 | + source = "../../../../common" |
| 6 | +} |
| 7 | + |
| 8 | +module "basic_components" { |
| 9 | + source = "../../../../basic_components" |
| 10 | + |
| 11 | + region = var.region |
| 12 | +} |
| 13 | + |
| 14 | +locals { |
| 15 | + aws_eks = "aws eks --region ${var.region}" |
| 16 | +} |
| 17 | + |
| 18 | +data "aws_eks_cluster_auth" "this" { |
| 19 | + name = aws_eks_cluster.this.name |
| 20 | +} |
| 21 | + |
| 22 | +resource "aws_eks_cluster" "this" { |
| 23 | + name = "cwagent-eks-integ-${module.common.testing_id}" |
| 24 | + role_arn = module.basic_components.role_arn |
| 25 | + version = var.k8s_version |
| 26 | + vpc_config { |
| 27 | + subnet_ids = module.basic_components.public_subnet_ids |
| 28 | + security_group_ids = [module.basic_components.security_group] |
| 29 | + } |
| 30 | +} |
| 31 | + |
| 32 | +# EKS Node Groups |
| 33 | +resource "aws_eks_node_group" "this" { |
| 34 | + cluster_name = aws_eks_cluster.this.name |
| 35 | + node_group_name = "cwagent-eks-integ-node-${module.common.testing_id}" |
| 36 | + node_role_arn = aws_iam_role.node_role.arn |
| 37 | + subnet_ids = module.basic_components.public_subnet_ids |
| 38 | + |
| 39 | + scaling_config { |
| 40 | + desired_size = 1 |
| 41 | + max_size = 1 |
| 42 | + min_size = 1 |
| 43 | + } |
| 44 | + |
| 45 | + ami_type = var.ami_type |
| 46 | + capacity_type = "ON_DEMAND" |
| 47 | + disk_size = 20 |
| 48 | + instance_types = [var.instance_type] |
| 49 | + |
| 50 | + depends_on = [ |
| 51 | + aws_iam_role_policy_attachment.node_AmazonEC2ContainerRegistryReadOnly, |
| 52 | + aws_iam_role_policy_attachment.node_AmazonEKS_CNI_Policy, |
| 53 | + aws_iam_role_policy_attachment.node_AmazonEKSWorkerNodePolicy, |
| 54 | + aws_iam_role_policy_attachment.pod_CloudWatchAgentServerPolicy |
| 55 | + ] |
| 56 | +} |
| 57 | + |
| 58 | +resource "aws_eks_addon" "pod_identity_addon" { |
| 59 | + cluster_name = aws_eks_cluster.this.name |
| 60 | + addon_name = "eks-pod-identity-agent" |
| 61 | + depends_on = [aws_eks_node_group.this] |
| 62 | +} |
| 63 | + |
| 64 | +# EKS Node IAM Role |
| 65 | +resource "aws_iam_role" "node_role" { |
| 66 | + name = "cwagent-eks-Worker-Role-${module.common.testing_id}" |
| 67 | + |
| 68 | + assume_role_policy = <<POLICY |
| 69 | +{ |
| 70 | + "Version": "2012-10-17", |
| 71 | + "Statement": [ |
| 72 | + { |
| 73 | + "Effect": "Allow", |
| 74 | + "Principal": { |
| 75 | + "Service": "ec2.amazonaws.com" |
| 76 | + }, |
| 77 | + "Action": "sts:AssumeRole" |
| 78 | + } |
| 79 | + ] |
| 80 | +} |
| 81 | +POLICY |
| 82 | +} |
| 83 | + |
| 84 | +data "aws_iam_policy_document" "pod-identity-policy" { |
| 85 | + statement { |
| 86 | + effect = "Allow" |
| 87 | + |
| 88 | + principals { |
| 89 | + type = "Service" |
| 90 | + identifiers = ["pods.eks.amazonaws.com"] |
| 91 | + } |
| 92 | + |
| 93 | + actions = [ |
| 94 | + "sts:AssumeRole", |
| 95 | + "sts:TagSession" |
| 96 | + ] |
| 97 | + } |
| 98 | +} |
| 99 | + |
| 100 | + |
| 101 | +resource "aws_iam_role" "pod-identity-role" { |
| 102 | + name = "cwagent-eks-pod-identity-role-${module.common.testing_id}" |
| 103 | + assume_role_policy = data.aws_iam_policy_document.pod-identity-policy.json |
| 104 | +} |
| 105 | + |
| 106 | +resource "aws_iam_role_policy_attachment" "node_AmazonEKSWorkerNodePolicy" { |
| 107 | + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" |
| 108 | + role = aws_iam_role.node_role.name |
| 109 | +} |
| 110 | + |
| 111 | +resource "aws_iam_role_policy_attachment" "node_AmazonEKS_CNI_Policy" { |
| 112 | + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" |
| 113 | + role = aws_iam_role.node_role.name |
| 114 | +} |
| 115 | + |
| 116 | +resource "aws_iam_role_policy_attachment" "node_AmazonEC2ContainerRegistryReadOnly" { |
| 117 | + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" |
| 118 | + role = aws_iam_role.node_role.name |
| 119 | +} |
| 120 | + |
| 121 | +resource "aws_iam_role_policy_attachment" "pod_CloudWatchAgentServerPolicy" { |
| 122 | + policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" |
| 123 | + role = aws_iam_role.pod-identity-role.name |
| 124 | +} |
| 125 | + |
| 126 | +resource "aws_eks_pod_identity_association" "association" { |
| 127 | + cluster_name = aws_eks_cluster.this.name |
| 128 | + namespace = "amazon-cloudwatch" |
| 129 | + service_account = "cloudwatch-agent" |
| 130 | + role_arn = aws_iam_role.pod-identity-role.arn |
| 131 | + depends_on = [aws_eks_cluster.this] |
| 132 | +} |
| 133 | + |
| 134 | +# TODO: these security groups be created once and then reused |
| 135 | +# EKS Cluster Security Group |
| 136 | +resource "aws_security_group" "eks_cluster_sg" { |
| 137 | + name = "cwagent-eks-cluster-sg-${module.common.testing_id}" |
| 138 | + description = "Cluster communication with worker nodes" |
| 139 | + vpc_id = module.basic_components.vpc_id |
| 140 | +} |
| 141 | + |
| 142 | +resource "aws_security_group_rule" "cluster_inbound" { |
| 143 | + description = "Allow worker nodes to communicate with the cluster API Server" |
| 144 | + from_port = 443 |
| 145 | + protocol = "tcp" |
| 146 | + security_group_id = aws_security_group.eks_cluster_sg.id |
| 147 | + source_security_group_id = aws_security_group.eks_nodes_sg.id |
| 148 | + to_port = 443 |
| 149 | + type = "ingress" |
| 150 | +} |
| 151 | + |
| 152 | +resource "aws_security_group_rule" "cluster_outbound" { |
| 153 | + description = "Allow cluster API Server to communicate with the worker nodes" |
| 154 | + from_port = 1024 |
| 155 | + protocol = "tcp" |
| 156 | + security_group_id = aws_security_group.eks_cluster_sg.id |
| 157 | + source_security_group_id = aws_security_group.eks_nodes_sg.id |
| 158 | + to_port = 65535 |
| 159 | + type = "egress" |
| 160 | +} |
| 161 | + |
| 162 | + |
| 163 | +# EKS Node Security Group |
| 164 | +resource "aws_security_group" "eks_nodes_sg" { |
| 165 | + name = "cwagent-eks-node-sg-${module.common.testing_id}" |
| 166 | + description = "Security group for all nodes in the cluster" |
| 167 | + vpc_id = module.basic_components.vpc_id |
| 168 | + |
| 169 | + egress { |
| 170 | + from_port = 0 |
| 171 | + to_port = 0 |
| 172 | + protocol = "-1" |
| 173 | + cidr_blocks = ["0.0.0.0/0"] |
| 174 | + } |
| 175 | +} |
| 176 | + |
| 177 | +resource "aws_security_group_rule" "nodes_internal" { |
| 178 | + description = "Allow nodes to communicate with each other" |
| 179 | + from_port = 0 |
| 180 | + protocol = "-1" |
| 181 | + security_group_id = aws_security_group.eks_nodes_sg.id |
| 182 | + source_security_group_id = aws_security_group.eks_nodes_sg.id |
| 183 | + to_port = 65535 |
| 184 | + type = "ingress" |
| 185 | +} |
| 186 | + |
| 187 | +resource "aws_security_group_rule" "nodes_cluster_inbound" { |
| 188 | + description = "Allow worker Kubelets and pods to receive communication from the cluster control plane" |
| 189 | + from_port = 1025 |
| 190 | + protocol = "tcp" |
| 191 | + security_group_id = aws_security_group.eks_nodes_sg.id |
| 192 | + source_security_group_id = aws_security_group.eks_cluster_sg.id |
| 193 | + to_port = 65535 |
| 194 | + type = "ingress" |
| 195 | +} |
| 196 | + |
| 197 | +resource "null_resource" "clone_helm_chart" { |
| 198 | + triggers = { |
| 199 | + timestamp = "${timestamp()}" # Forces re-run on every apply |
| 200 | + } |
| 201 | + provisioner "local-exec" { |
| 202 | + command = <<-EOT |
| 203 | + if [ ! -d "./helm-charts" ]; then |
| 204 | + git clone -b ${var.helm_chart_branch} https://github.com/aws-observability/helm-charts.git ./helm-charts |
| 205 | + fi |
| 206 | + EOT |
| 207 | + } |
| 208 | +} |
| 209 | + |
| 210 | +resource "helm_release" "aws_observability" { |
| 211 | + name = "amazon-cloudwatch-observability" |
| 212 | + chart = "./helm-charts/charts/amazon-cloudwatch-observability" |
| 213 | + namespace = "amazon-cloudwatch" |
| 214 | + create_namespace = true |
| 215 | + |
| 216 | + set { |
| 217 | + name = "clusterName" |
| 218 | + value = aws_eks_cluster.this.name |
| 219 | + } |
| 220 | + |
| 221 | + set { |
| 222 | + name = "region" |
| 223 | + value = "us-west-2" |
| 224 | + } |
| 225 | + depends_on = [ |
| 226 | + aws_eks_cluster.this, |
| 227 | + aws_eks_node_group.this, |
| 228 | + null_resource.clone_helm_chart, |
| 229 | + aws_eks_addon.pod_identity_addon, |
| 230 | + aws_eks_pod_identity_association.association, |
| 231 | + ] |
| 232 | +} |
| 233 | + |
| 234 | +resource "null_resource" "kubectl" { |
| 235 | + depends_on = [ |
| 236 | + aws_eks_cluster.this, |
| 237 | + aws_eks_node_group.this, |
| 238 | + ] |
| 239 | + provisioner "local-exec" { |
| 240 | + command = <<-EOT |
| 241 | + ${local.aws_eks} update-kubeconfig --name ${aws_eks_cluster.this.name} |
| 242 | + ${local.aws_eks} list-clusters --output text |
| 243 | + ${local.aws_eks} describe-cluster --name ${aws_eks_cluster.this.name} --output text |
| 244 | + EOT |
| 245 | + } |
| 246 | +} |
| 247 | + |
| 248 | +resource "null_resource" "update_image" { |
| 249 | + depends_on = [helm_release.aws_observability, null_resource.kubectl] |
| 250 | + triggers = { |
| 251 | + timestamp = "${timestamp()}" # Forces re-run on every apply |
| 252 | + } |
| 253 | + provisioner "local-exec" { |
| 254 | + command = <<-EOT |
| 255 | + kubectl -n amazon-cloudwatch patch AmazonCloudWatchAgent cloudwatch-agent --type='json' -p='[{"op": "replace", "path": "/spec/image", "value": "${var.cwagent_image_repo}:${var.cwagent_image_tag}"}]' |
| 256 | + kubectl set image deployment/amazon-cloudwatch-observability-controller-manager -n amazon-cloudwatch manager=public.ecr.aws/cloudwatch-agent/cloudwatch-agent-operator:latest |
| 257 | + sleep 10 |
| 258 | + EOT |
| 259 | + } |
| 260 | +} |
| 261 | + |
| 262 | +resource "kubernetes_pod" "log_generator" { |
| 263 | + depends_on = [aws_eks_node_group.this] |
| 264 | + metadata { |
| 265 | + name = "log-generator" |
| 266 | + namespace = "default" |
| 267 | + } |
| 268 | + |
| 269 | + spec { |
| 270 | + container { |
| 271 | + name = "log-generator" |
| 272 | + image = "busybox" |
| 273 | + |
| 274 | + # Run shell script that generate a log line every second |
| 275 | + command = ["/bin/sh", "-c"] |
| 276 | + args = ["while true; do echo \"Log entry at $(date)\"; sleep 1; done"] |
| 277 | + } |
| 278 | + restart_policy = "Always" |
| 279 | + } |
| 280 | +} |
| 281 | + |
| 282 | +# Get the single instance ID of the node in the node group |
| 283 | +data "aws_instances" "eks_node" { |
| 284 | + depends_on = [ |
| 285 | + aws_eks_node_group.this |
| 286 | + ] |
| 287 | + filter { |
| 288 | + name = "tag:eks:nodegroup-name" |
| 289 | + values = [aws_eks_node_group.this.node_group_name] |
| 290 | + } |
| 291 | +} |
| 292 | + |
| 293 | +# Retrieve details of the single instance to get private DNS |
| 294 | +data "aws_instance" "eks_node_detail" { |
| 295 | + depends_on = [ |
| 296 | + data.aws_instances.eks_node |
| 297 | + ] |
| 298 | + instance_id = data.aws_instances.eks_node.ids[0] |
| 299 | +} |
| 300 | + |
| 301 | +resource "null_resource" "validator" { |
| 302 | + depends_on = [ |
| 303 | + aws_eks_node_group.this, |
| 304 | + aws_eks_addon.pod_identity_addon, |
| 305 | + helm_release.aws_observability, |
| 306 | + null_resource.update_image, |
| 307 | + kubernetes_pod.log_generator, |
| 308 | + ] |
| 309 | + |
| 310 | + triggers = { |
| 311 | + always_run = timestamp() |
| 312 | + } |
| 313 | + |
| 314 | + provisioner "local-exec" { |
| 315 | + command = <<-EOT |
| 316 | + echo "Validating CloudWatch Agent with pod identity credential" |
| 317 | + cd ../../../../.. |
| 318 | + go test ./test/metric_value_benchmark -timeout 1h -eksClusterName=${aws_eks_cluster.this.name} -computeType=EKS -v -eksDeploymentStrategy=PODIDENTITY -instanceId=${data.aws_instance.eks_node_detail.instance_id} && |
| 319 | + go test ./test/fluent -eksClusterName=${aws_eks_cluster.this.name} -computeType=EKS -v -eksDeploymentStrategy=DAEMON |
| 320 | + EOT |
| 321 | + } |
| 322 | +} |
| 323 | + |
0 commit comments