forked from mylandmarktechs/eks-terraform-setup
-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy patheks-worker-nodes.tf
119 lines (98 loc) · 3.24 KB
/
eks-worker-nodes.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#
# EKS Worker Nodes Resources
# * IAM role allowing Kubernetes actions to access other AWS services
# * EKS Node Group to launch worker nodes
#
resource "aws_iam_role" "demo-node" {
name = "terraform-eks-demo-node"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
data "aws_iam_policy_document" "worker_autoscaling" {
statement {
sid = "eksWorkerAutoscalingAll"
effect = "Allow"
actions = [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:DescribeLaunchTemplateVersions",
]
resources = ["*"]
}
statement {
sid = "eksWorkerAutoscalingOwn"
effect = "Allow"
actions = [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
]
resources = ["*"]
condition {
test = "StringEquals"
variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${aws_eks_cluster.demo.id}"
values = ["owned"]
}
condition {
test = "StringEquals"
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
values = ["true"]
}
}
}
resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
policy_arn = aws_iam_policy.worker_autoscaling.arn
role = aws_iam_role.demo-node.name
}
resource "aws_iam_policy" "worker_autoscaling" {
name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.demo.id}"
description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.demo.id}"
policy = data.aws_iam_policy_document.worker_autoscaling.json
}
resource "aws_iam_role_policy_attachment" "demo-node-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.demo-node.name
}
resource "aws_iam_role_policy_attachment" "demo-node-AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.demo-node.name
}
resource "aws_iam_role_policy_attachment" "demo-node-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.demo-node.name
}
resource "aws_eks_node_group" "demo" {
cluster_name = aws_eks_cluster.demo.name
node_group_name = "demo"
node_role_arn = aws_iam_role.demo-node.arn
subnet_ids = aws_subnet.demo[*].id
instance_types = [var.eks_node_instance_type]
remote_access{
ec2_ssh_key = var.key_pair_name
}
scaling_config {
desired_size = 2
max_size = 200
min_size = 2
}
depends_on = [
aws_iam_role_policy_attachment.workers_autoscaling,
aws_iam_role_policy_attachment.demo-node-AmazonEKSWorkerNodePolicy,
aws_iam_role_policy_attachment.demo-node-AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.demo-node-AmazonEC2ContainerRegistryReadOnly,
]
}