-
Notifications
You must be signed in to change notification settings - Fork 68
/
main.tf
230 lines (189 loc) · 6.9 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
locals {
# Create a unique cluster name we'll prefix to all resources created and ensure it's lowercase
uname = var.unique_suffix ? lower("${var.cluster_name}-${random_string.uid.result}") : lower(var.cluster_name)
default_tags = {
"ClusterType" = "rke2",
}
ccm_tags = {
"kubernetes.io/cluster/${local.uname}" = "owned"
}
cluster_data = {
name = local.uname
server_url = module.cp_lb.dns
cluster_sg = aws_security_group.cluster.id
token = module.statestore.token
}
lb_subnets = var.lb_subnets == null ? var.subnets : var.lb_subnets
target_group_arns = module.cp_lb.target_group_arns
}
resource "random_string" "uid" {
# NOTE: Don't get too crazy here, several aws resources have tight limits on lengths (such as load balancers), in practice we are also relying on users to uniquely identify their cluster names
length = 3
special = false
lower = true
upper = false
numeric = true
}
#
# Cluster join token
#
resource "random_password" "token" {
length = 40
special = false
}
module "statestore" {
source = "./modules/statestore"
name = local.uname
create_acl = var.create_acl
token = random_password.token.result
tags = merge(local.default_tags, var.tags)
attach_deny_insecure_transport_policy = var.statestore_attach_deny_insecure_transport_policy
}
#
# Controlplane Load Balancer
#
module "cp_lb" {
source = "./modules/nlb"
name = local.uname
vpc_id = var.vpc_id
subnets = local.lb_subnets
enable_cross_zone_load_balancing = var.controlplane_enable_cross_zone_load_balancing
internal = var.controlplane_internal
access_logs_bucket = var.controlplane_access_logs_bucket
cp_ingress_cidr_blocks = var.controlplane_allowed_cidrs
cp_supervisor_ingress_cidr_blocks = var.controlplane_allowed_cidrs
tags = merge({}, local.default_tags, local.default_tags, var.tags)
}
#
# Security Groups
#
# Shared Cluster Security Group
resource "aws_security_group" "cluster" {
name = "${local.uname}-rke2-cluster"
description = "Shared ${local.uname} cluster security group"
vpc_id = var.vpc_id
tags = merge({
"shared" = "true",
}, local.default_tags, var.tags)
}
resource "aws_security_group_rule" "cluster_shared" {
description = "Allow all inbound traffic between ${local.uname} cluster nodes"
from_port = 0
to_port = 0
protocol = "-1"
security_group_id = aws_security_group.cluster.id
type = "ingress"
self = true
}
resource "aws_security_group_rule" "cluster_egress" {
description = "Allow all outbound traffic"
from_port = 0
to_port = 0
protocol = "-1"
security_group_id = aws_security_group.cluster.id
type = "egress"
cidr_blocks = ["0.0.0.0/0"]
}
# Server Security Group
resource "aws_security_group" "server" {
name = "${local.uname}-rke2-server"
vpc_id = var.vpc_id
description = "${local.uname} rke2 server node pool"
tags = merge(local.default_tags, var.tags)
}
resource "aws_security_group_rule" "server_cp" {
from_port = 6443
to_port = 6443
protocol = "tcp"
security_group_id = aws_security_group.server.id
type = "ingress"
source_security_group_id = module.cp_lb.security_group
}
resource "aws_security_group_rule" "server_cp_supervisor" {
from_port = 9345
to_port = 9345
protocol = "tcp"
security_group_id = aws_security_group.server.id
type = "ingress"
source_security_group_id = module.cp_lb.security_group
}
#
# IAM Role
#
module "iam" {
count = var.iam_instance_profile == "" ? 1 : 0
source = "./modules/policies"
name = "${local.uname}-rke2-server"
permissions_boundary = var.iam_permissions_boundary
tags = merge({}, local.default_tags, var.tags)
}
#
# Policies
#
resource "aws_iam_role_policy" "aws_required" {
count = var.iam_instance_profile == "" ? 1 : 0
name = "${local.uname}-rke2-server-aws-introspect"
role = module.iam[count.index].role
policy = data.aws_iam_policy_document.aws_required[count.index].json
}
resource "aws_iam_role_policy" "aws_ccm" {
count = var.iam_instance_profile == "" && var.enable_ccm ? 1 : 0
name = "${local.uname}-rke2-server-aws-ccm"
role = module.iam[count.index].role
policy = data.aws_iam_policy_document.aws_ccm[count.index].json
}
resource "aws_iam_role_policy" "aws_autoscaler" {
count = var.iam_instance_profile == "" && var.enable_autoscaler ? 1 : 0
name = "${local.uname}-rke2-server-aws-autoscaler"
role = module.iam[count.index].role
policy = data.aws_iam_policy_document.aws_autoscaler[count.index].json
}
resource "aws_iam_role_policy" "get_token" {
#count = var.iam_instance_profile == "" ? 1 : 0
name = "${local.uname}-rke2-server-get-token"
role = var.iam_instance_profile == "" ? module.iam[0].role : data.aws_iam_role.provided[0].name
policy = module.statestore.token.policy_document
}
resource "aws_iam_role_policy" "put_kubeconfig" {
#count = var.iam_instance_profile == "" ? 1 : 0
name = "${local.uname}-rke2-server-put-kubeconfig"
role = var.iam_instance_profile == "" ? module.iam[0].role : data.aws_iam_role.provided[0].name
policy = module.statestore.kubeconfig_put_policy
}
#
# Server Nodepool
#
module "servers" {
source = "./modules/nodepool"
name = "${local.uname}-server"
vpc_id = var.vpc_id
subnets = var.subnets
ami = var.ami
instance_type = var.instance_type
block_device_mappings = var.block_device_mappings
extra_block_device_mappings = var.extra_block_device_mappings
vpc_security_group_ids = concat(
[aws_security_group.cluster.id, aws_security_group.server.id],
var.extra_security_group_ids)
spot = var.spot
target_group_arns = local.target_group_arns
wait_for_capacity_timeout = var.wait_for_capacity_timeout
metadata_options = var.metadata_options
associate_public_ip_address = var.associate_public_ip_address
# Overrideable variables
userdata = data.cloudinit_config.this.rendered
iam_instance_profile = var.iam_instance_profile == "" ? module.iam[0].iam_instance_profile : var.iam_instance_profile
# Don't allow something not recommended within etcd scaling, set max deliberately and only control desired
asg = {
min = 1
max = 7
desired = var.servers
suspended_processes = var.suspended_processes
termination_policies = var.termination_policies
}
# TODO: Ideally set this to `length(var.servers)`, but currently blocked by: https://github.com/rancher/rke2/issues/349
min_elb_capacity = 1
tags = merge({
"Role" = "server",
}, local.ccm_tags, var.tags)
}