-
Notifications
You must be signed in to change notification settings - Fork 97
/
Taskfile.yaml
474 lines (426 loc) · 17.4 KB
/
Taskfile.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
version: '3'
# NOTE: Task doesn't allow to override environment variables. Thus, when an
# environment variable is referenced in a taskfile, it is because we expect it
# to be defined by the environment where Task is being invoked or in a task's
# `env:` attribute.
dotenv: []
vars:
BINDIR: .local/bin
# Configuration Defaults
NAMESPACE: rn
RELEASE: rp
KIND_CLUSTERNAME: rp-helm
CLUSTER_NAME: '{{.CLUSTER_NAME | default "capi"}}'
CLUSTER_NAMESPACE: '{{.CLUSTER_NAMESPACE | default "default"}}'
# Overridable task vars
HELM_OPTIONS: ""
KIND_FLAGS: "--config .github/kind.yaml"
SRC_DIR:
sh: realpath {{default "." .SRC_DIR}}
includes:
tool:
taskfile: tasks/ToolTasks.yaml
vars:
BINDIR: "{{.BINDIR}}"
# if a task is referenced multiple times, only run it once
run: once
# Try to follow "THING:ACTION:SPECIFIER" for specific actions.
# Examples:
# - chart:lint:redpanda # Lint the redpanda chart
# - helm:add-repo:jetstack # Add the jetstack repo to helm
# - helm:install:cert-manager # Install cert-manager using helm
# When this files becomes too long, all of THING can be extracted into it's own
# file.
# For CI or running many types of similar ACTIONs, have a top level task named
# just ACTION. For example, "lint" would run tasks that match *:lint:*. (Though
# there's no matching in taskile AFAIK so this is done by hand).
#
# Feel free to change this format provided there's a general flow to the new
# format, all existing tasks are changed, and backwards compatibility is
# maintained via aliases.
tasks:
shell:
desc: "Launch a development shell"
cmds:
- nix develop --impure
ci:lint:
cmds:
- task: generate
- gofumpt -w .
# Fail on any generated diffs.
- git diff --exit-code
# Actually run linters.
- actionlint
- ct lint --chart-dirs ./charts --check-version-increment=false --github-groups --all
- .github/check-ci-files.sh charts/connectors/ci
- .github/check-ci-files.sh charts/kminion/ci
- .github/check-ci-files.sh charts/operator/ci
- .github/check-ci-files.sh charts/redpanda/ci
- staticcheck ./...
generate:
desc: "Run all file generation tasks"
cmds:
# Generate chart README.md's
- nix develop -c helm-docs -c ./charts/
- task: chart:generate:console
- task: chart:generate:operator
- task: chart:generate:redpanda
- task: chart:generate:connectors
# Ensure go deps are up to date
- go mod tidy
# Ensure go deps in the gotohelm test package are up to date
- cd ./pkg/gotohelm/testdata/src/example && go mod tidy
- go work sync
# Ensure flake.nix has been formatted.
- nix fmt
chart:generate:console:
desc: "Generate files for the console Helm chart"
cmds:
# Generate a "partial" version of the Values struct.
- go run ./cmd/genpartial -out charts/console/values_partial.gen.go -struct Values ./charts/console
# Generate helm templates from Go definitions
- go run ./cmd/gotohelm -write ./charts/console/templates ./charts/console
chart:generate:operator:
desc: "Generate files for the operator Helm chart"
cmds:
# Generate a "partial" version of the Values struct.
- go run ./cmd/genpartial -out charts/operator/values_partial.gen.go -struct Values ./charts/operator
# Generate the values JSON schema from the Values struct
- go run ./cmd/genschema operator > charts/operator/values.schema.json
# Generate helm templates from Go definitions
- go run ./cmd/gotohelm -write ./charts/operator/templates ./charts/operator
chart:generate:redpanda:
desc: "Generate files for the redpanda Helm chart"
cmds:
# Generate a "partial" version of the Values struct.
- go run ./cmd/genpartial -out charts/redpanda/values_partial.gen.go -struct Values ./charts/redpanda
# Generate the values JSON schema from the Values struct
- go run ./cmd/genschema redpanda > charts/redpanda/values.schema.json
# Generate helm templates from Go definitions
- go run ./cmd/gotohelm -write ./charts/redpanda/templates ./charts/redpanda ./charts/...
chart:generate:connectors:
desc: "Generate files for the connectors Helm chart"
cmds:
# Generate a "partial" version of the Values struct.
- go run ./cmd/genpartial -out charts/connectors/values_partial.gen.go -struct Values ./charts/connectors
# Generate helm templates from Go definitions
- go run ./cmd/gotohelm -write ./charts/connectors/templates ./charts/connectors
create-test-rack-awareness:
cmds:
- .github/annotate_kind_nodes.sh {{.KIND_CLUSTERNAME}}
create-test-metallb-resources:
cmds:
- kubectl -n metallb-system apply -f .github/metallb-config.yaml
create-test-tls-template:
cmds:
- .github/create_tls.sh "random-domain"
create-test-sasl-secret-template:
cmds:
- .github/create-sasl-secret.sh "some-users"
create-test-mv-files:
cmds:
- mv external-tls-secret.yaml charts/redpanda/templates/
- cp .github/external-service.yaml charts/redpanda/templates/
- mv some-users-updated.yaml charts/redpanda/templates/
setup-test-files:
cmds:
- task: create-test-rack-awareness
- task: create-test-tls-template
- task: create-test-sasl-secret-template
- task: create-test-mv-files
- task: create-test-metallb-resources
up:
cmds:
- task: kind-create
- task: setup-test-files
- task: install-redpanda-chart
helm:add-repo:jetstack:
aliases:
- add-jetstack-repo
cmds:
- helm repo add jetstack https://charts.jetstack.io
- helm repo update
status:
- helm search repo -r '\vjetstack/cert-manager\v' | grep cert-manager
helm:add-repo:prometheus-community:
aliases:
- add-prometheus-community-repo
cmds:
- helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
- helm repo update
status:
- helm search repo -r '\vprometheus-community/kube-prometheus-stack\v' | grep metallb
helm:add-repo:metallb:
aliases:
- add-metallb-repo
cmds:
- helm repo add metallb https://metallb.github.io/metallb
- helm repo update
status:
- helm search repo -r '\vmetallb/metallb\v' | grep metallb
helm:install:metallb:
aliases:
- install-metallb
deps:
- helm:add-repo:metallb
cmds:
- |
helm install metallb metallb/metallb \
--create-namespace \
--namespace metallb-system \
--version 0.13.10 \
--wait \
--wait-for-jobs
- kubectl --namespace metallb-system apply -f .github/metallb-config.yaml
helm:install:cert-manager:
aliases:
- install-cert-manager
deps:
- helm:add-repo:jetstack
cmds:
- |
helm install cert-manager jetstack/cert-manager \
--create-namespace \
--namespace cert-manager \
--set installCRDs=true \
--version v1.11.0 \
--wait \
--wait-for-jobs
helm:install:kube-prometheus-stack:
aliases:
- install-kube-prometheus-stack
deps:
- helm:add-repo:prometheus-community
cmds:
- |
helm install prometheus prometheus-community/kube-prometheus-stack \
--namespace prometheus \
--create-namespace \
--set alertmanager.enabled=false \
--set grafana.enabled=false \
--set kubeStateMetrics.enabled=false \
--set nodeExporter.enabled=false \
--set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false \
--wait \
--wait-for-jobs
aws-check-login:
internal: true
cmds:
- aws sts get-caller-identity > /dev/null
kind-create:
cmds:
- kind create cluster --name {{.KIND_CLUSTERNAME}} {{.KIND_FLAGS}}
- task: install-cert-manager
- task: install-metallb
- task: install-kube-prometheus-stack
status:
- "kind get clusters | grep {{.KIND_CLUSTERNAME}}"
kind-delete:
cmds:
- kind delete cluster --name {{.KIND_CLUSTERNAME}}
install-redpanda-chart:
aliases:
- upgrade-redpanda-chart
cmds:
- helm upgrade --install {{ .RELEASE }} ./charts/redpanda --namespace {{ .NAMESPACE }} --create-namespace --wait --debug {{ .HELM_OPTIONS }}
uninstall-redpanda-chart:
cmds:
- helm uninstall {{ .RELEASE }} -n {{ .NAMESPACE }} --wait || true
- kubectl -n {{ .NAMESPACE }} delete pods --all --grace-period=0 --force --wait=false || true
- kubectl -n {{ .NAMESPACE }} delete pvc --all --force --grace-period=0 --wait=false || true
- kubectl delete ns {{ .NAMESPACE }}
minikube-start:
cmds:
- minikube start --nodes=4
- kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.23/deploy/local-path-storage.yaml
- ./scripts/change-default-sc.sh
- task: install-cert-manager
minikube-delete:
cmds:
- minikube delete
capi-bootstrap-aws:
deps:
- tool:clusterctl
env:
EXP_MACHINE_POOL: true
CAPA_EKS_ADD_ROLES: "{{.CAPA_EKS_ADD_ROLES}}"
CAPA_EKS_IAM: "{{.CAPA_EKS_IAM}}"
AWS_B64ENCODED_CREDENTIALS: "{{.AWS_B64ENCODED_CREDENTIALS}}"
vars:
CAPI_INFRASTRUCTURE: '{{ default "unknown" .CAPI_INFRASTRUCTURE }}'
cmds:
- task: kind-create
vars:
KIND_CLUSTERNAME: bootstrap
KIND_FLAGS: ""
- "{{.BINDIR}}/clusterctl init -i {{ .CAPI_INFRASTRUCTURE }} -v7 --wait-providers"
capi-bootstrap-gke:
deps:
- tool:clusterctl
- tool:auth-gcp
env:
GCP_B64ENCODED_CREDENTIALS: "{{.GCP_B64ENCODED_CREDENTIALS}}"
EXP_CAPG_GKE: true
EXP_MACHINE_POOL: true
vars:
CAPI_INFRASTRUCTURE: gcp
GCP_PROVIDER_VERSION: v1.3.0
cmds:
- task: kind-create
vars:
KIND_CLUSTERNAME: bootstrap
KIND_FLAGS: ""
- "{{.BINDIR}}/clusterctl init -i {{ .CAPI_INFRASTRUCTURE }}:{{ .GCP_PROVIDER_VERSION }} -v7 --wait-providers"
capi-bootstrap-aks:
deps:
- tool:clusterctl
env:
EXP_MACHINE_POOL: true
vars:
CAPI_INFRASTRUCTURE: '{{ default "unknown" .CAPI_INFRASTRUCTURE }}'
AKS_PROVIDER_VERSION: v1.10.0
cmds:
- echo ~~~ Create kind cluster
- task: kind-create
vars:
KIND_CLUSTERNAME: bootstrap
KIND_FLAGS: ""
- echo ~~~ Initialize cluster API core controllers
- "{{.BINDIR}}/clusterctl init -i {{ .CAPI_INFRASTRUCTURE }}:{{ .AKS_PROVIDER_VERSION }} -v7 --wait-providers"
capi-create-eks:
deps:
- aws-check-login
- tool:clusterawsadm
env:
CAPA_EKS_IAM: true
CAPA_EKS_ADD_ROLES: true
cmds:
- task: capi-bootstrap-aws
vars:
CAPI_INFRASTRUCTURE: aws
CAPA_EKS_IAM: true
CAPA_EKS_ADD_ROLES: true
AWS_B64ENCODED_CREDENTIALS:
sh: "{{.BINDIR}}/clusterawsadm bootstrap credentials encode-as-profile 2>/dev/null"
- "{{.BINDIR}}/clusterawsadm bootstrap iam create-cloudformation-stack --config=.buildkite/capi/eks-bootstrap.yaml"
- helm install -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} .buildkite/capi/eks-cluster --create-namespace --debug
- kubectl wait --for=condition=ready cluster {{.CLUSTER_NAME}}-eks-cluster --timeout=40m
- kubectl wait --for=condition=Ready machinepool {{.CLUSTER_NAME}}-eks-cluster-pool-0 --timeout=20m
capi-create-gke:
env:
# this is done by running the following:
# export GCP_B64ENCODED_CREDENTIALS=$(base64 < "${GOOGLE_APPLICATION_CREDENTIALS}" | tr -d '\n')
GCP_B64ENCODED_CREDENTIALS: "{{.GCP_B64ENCODED_CREDENTIALS}}"
PROJECT_ID: "{{.PROJECT_ID}}"
cmds:
- task: capi-bootstrap-gke
vars:
CAPI_INFRASTRUCTURE: gcp
- helm install -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} .buildkite/capi/gke-cluster --create-namespace --set projectID={{.PROJECT_ID}} --debug
- kubectl wait --for=condition=ready cluster {{.CLUSTER_NAME}}-gke-cluster --timeout=40m
- kubectl wait --for=condition=Ready machinepool {{.CLUSTER_NAME}}-gke-cluster-mp-0 --timeout=20m
capi-create-aks:
cmds:
- task: capi-bootstrap-aks
vars:
CAPI_INFRASTRUCTURE: azure
# Create a secret to include the password of the Service Principal identity created in Azure
# This secret will be referenced by the AzureClusterIdentity used by the AzureCluster
- echo ~~~ Install Azure cluster API controllers
- kubectl create ns "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}}" || true
- kubectl delete secret "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAME}}" --namespace "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}}" || true
- kubectl create secret generic "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAME}}" --from-literal=clientSecret="{{.AZURE_CLIENT_SECRET}}" --namespace "{{.AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}}"
- |
helm install -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} .buildkite/capi/aks-cluster \
--create-namespace \
--set clientID={{.AZURE_CLIENT_ID}} \
--set tenantID={{.AZURE_TENANT_ID}} \
--set resourceGroup={{.TEST_RESOURCE_GROUP}} \
--set subscriptionID={{.AZURE_SUBSCRIPTION_ID}} \
--set workerReplicas=3 \
--set clientSecret.name={{.AZURE_CLUSTER_IDENTITY_SECRET_NAME}} \
--set clientSecret.namespace={{.AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE}} \
--debug
- echo ~~~ Wait for Azure infra to be provisioned
- kubectl wait --for=condition=ready cluster {{.CLUSTER_NAME}}-aks-cluster --timeout=40m -n {{.CLUSTER_NAMESPACE}}
- kubectl wait --for=condition=Ready azuremachines --all --timeout=40m -n {{.CLUSTER_NAMESPACE}}
- "{{.BINDIR}}/clusterctl get kubeconfig --namespace {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}}-aks-cluster > {{.CLUSTER_KUBECONFIG_PATH}}"
- echo ~~~ Azure K8S cluster
- echo ~~~ Install Azure Cloud Controller Manager
- |
helm install \
--kubeconfig={{.CLUSTER_KUBECONFIG_PATH}} \
--repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure \
--generate-name \
--set infra.clusterName={{.CLUSTER_NAME}} \
--set cloudControllerManager.clusterCIDR="192.168.0.0/16" \
--debug
- echo "~~~ Install project calico - CNI"
- |
helm repo add projectcalico https://docs.tigera.io/calico/charts \
--kubeconfig={{.CLUSTER_KUBECONFIG_PATH}}
- |
helm install calico projectcalico/tigera-operator \
--kubeconfig={{.CLUSTER_KUBECONFIG_PATH}} \
-f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico/values.yaml \
--namespace tigera-operator --create-namespace
- echo ~~~ Installing azure disk csi driver
- curl -skSLo install-driver.sh https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/v1.28.1/deploy/install-driver.sh
- chmod +x ./install-driver.sh
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} ./install-driver.sh v1.28.1 snapshot
- rm ./install-driver.sh
- echo ~~~ Installing azure storage class
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl create -f https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/storageclass-azuredisk-csi.yaml
- echo ~~~ Wait for all nodes to become ready
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl get pod -A
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl get nodes
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl wait --for=condition=ready nodes --all --timeout=10m
- KUBECONFIG={{.CLUSTER_KUBECONFIG_PATH}} kubectl get nodes
- echo ~~~ Azure K8S cluster created
capi-delete:
internal: true
cmds:
- kubectl delete -f {{.CLUSTERFILE}} --wait
capi-delete-eks:
env:
AWS_B64ENCODED_CREDENTIALS:
sh: "{{.BINDIR}}/clusterawsadm bootstrap credentials encode-as-profile 2> /dev/null"
cmds:
- "{{.BINDIR}}/clusterawsadm controller update-credentials"
- helm uninstall --wait -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} --timeout 60m
capi-delete-gke:
deps:
- tool:kubectl
- tool:helm
env:
GCP_B64ENCODED_CREDENTIALS: "{{.GCP_B64ENCODED_CREDENTIALS}}"
cmds:
- kubectl delete cluster {{.CLUSTER_NAME}}-gke-cluster --timeout=20m
- helm uninstall --wait -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} --timeout 10m --debug
capi-delete-aks:
deps:
- tool:helm
cmds:
- helm uninstall --wait -n {{.CLUSTER_NAMESPACE}} {{.CLUSTER_NAME}} --timeout 60m --debug
# This is if you are running locally and not in CI
gke-auth-login:
deps:
- tool:gcloud
env:
PROJECT_ID: "{{.PROJECT_ID}}"
GOOGLE_APPLICATION_CREDENTIALS: "{{.GOOGLE_APPLICATION_CREDENTIALS}}"
cmds:
- gcloud auth activate-service-account --key-file={{.GOOGLE_APPLICATION_CREDENTIALS}}
- gcloud config set project {{.PROJECT_ID}} --quiet
gke-get-kubeconfig:
deps:
- tool:gcloud
- tool:auth-gcp
- tool:gcloud-auth-plugin
env:
GOOGLE_APPLICATION_CREDENTIALS: "{{.GOOGLE_APPLICATION_CREDENTIALS}}"
PROJECT_ID: "{{.PROJECT_ID}}"
USE_GKE_GCLOUD_AUTH_PLUGIN: True
KUBECONFIG: '{{ default "capi-gke-cluster.conf" .KUBECONFIG}}'
cmds:
- gcloud container clusters get-credentials {{.CLUSTER_NAME}}-gke-cluster --region=us-west1 --project {{.PROJECT_ID}}