-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathMakefile
371 lines (339 loc) · 17.8 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
# Set environment variables
export CLUSTER_NAME?=keptn
export CILIUM_VERSION?=1.11.7
export CERT_MANAGER_CHART_VERSION=1.9.1
export ARGOCD_CHART_VERSION=4.10.9
export KEPTN_VERSION?=0.13.6
export TRIVY_IMAGE_CHECK=0
export ARGOCD_OPTS="--grpc-web --insecure --server argocd.127.0.0.1.nip.io"
# kind image list
# kindest/node:v1.22.7@sha256:1dfd72d193bf7da64765fd2f2898f78663b9ba366c2aa74be1fd7498a1873166
# kindest/node:v1.23.5@sha256:a69c29d3d502635369a5fe92d8e503c09581fcd406ba6598acc5d80ff5ba81b1
# "kindest/node:v1.24.1@sha256:fd82cddc87336d91aa0a2fc35f3c7a9463c53fd8e9575e9052d2c75c61f5b083"
export KIND_NODE_IMAGE="kindest/node:v1.24.2@sha256:1f0cee2282f43150b52dc7933183ed96abdcfc8d293f30ec07082495874876f1"
.PHONY: kind-basic
kind-basic: kind-create kx-kind kind-install-crds cilium-prepare-images cilium-install argocd-deploy nginx-ingress-deploy
.PHONY: kind-keptn
kind-keptn: kind-basic prometheus-stack-deploy keptn-prepare-images keptn-deploy
.PHONY: kind-create
kind-create:
ifeq ($(TRIVY_IMAGE_CHECK), 1)
trivy image --severity=HIGH --exit-code=0 "$(KIND_NODE_IMAGE)"
endif
# change resources for control plane pods
# https://github.com/kubernetes/kubeadm/pull/2184/files
mkdir -p /tmp/kind/kubeadm-patches
cp -a kind/kubeadm-patches/* /tmp/kind/kubeadm-patches
#
kind --version
kind create cluster --name "$(CLUSTER_NAME)" \
--config="kind/kind-config.yaml" \
--image="$(KIND_NODE_IMAGE)"
# for more control planes, but no workers
# kubectl taint nodes --all node-role.kubernetes.io/master- || true
.PHONY: kind-delete
kind-delete:
kind delete cluster --name $(CLUSTER_NAME)
.PHONY: kx-kind
kx-kind:
kind export kubeconfig --name $(CLUSTER_NAME)
.PHONY: kind-install-crds
kind-install-crds:
# fix prometheus-operator's CRDs
kubectl apply -f https://raw.githubusercontent.com/prometheus-community/helm-charts/refs/heads/main/charts/kube-prometheus-stack/charts/crds/crds/crd-servicemonitors.yaml
# for keptn
kubectl apply -f keptn/crd-istio-destinationrules.yaml \
-f keptn/crd-istio-virtualservices.yaml
# https://raw.githubusercontent.com/keptn-sandbox/keptn-in-a-box/master/resources/istio/public-gateway.yaml
.PHONY: cilium-prepare-images
cilium-prepare-images:
# pull image locally
docker pull quay.io/cilium/cilium:v$(CILIUM_VERSION)
docker pull quay.io/cilium/hubble-ui:v0.8.5
docker pull quay.io/cilium/hubble-ui-backend:v0.8.5
docker pull quay.io/cilium/hubble-relay:v$(CILIUM_VERSION)
docker pull docker.io/envoyproxy/envoy:v1.18.4@sha256:e5c2bb2870d0e59ce917a5100311813b4ede96ce4eb0c6bfa879e3fbe3e83935
ifeq ($(TRIVY_IMAGE_CHECK), 1)
trivy image --severity=HIGH --exit-code=0 quay.io/cilium/cilium:v$(CILIUM_VERSION)
trivy image --severity=HIGH --exit-code=0 quay.io/cilium/hubble-ui:v0.8.5
trivy image --severity=HIGH --exit-code=0 quay.io/cilium/hubble-ui-backend:v0.8.5
trivy image --severity=HIGH --exit-code=0 quay.io/cilium/hubble-relay:v$(CILIUM_VERSION)
trivy image --severity=HIGH --exit-code=0 docker.io/envoyproxy/envoy:v1.18.4@sha256:e5c2bb2870d0e59ce917a5100311813b4ede96ce4eb0c6bfa879e3fbe3e83935
endif
# Load the image onto the cluster
kind load docker-image --name $(CLUSTER_NAME) quay.io/cilium/cilium:v$(CILIUM_VERSION)
kind load docker-image --name $(CLUSTER_NAME) quay.io/cilium/hubble-ui:v0.8.5
kind load docker-image --name $(CLUSTER_NAME) quay.io/cilium/hubble-ui-backend:v0.8.5
kind load docker-image --name $(CLUSTER_NAME) quay.io/cilium/hubble-relay:v$(CILIUM_VERSION)
kind load docker-image --name $(CLUSTER_NAME) docker.io/envoyproxy/envoy:v1.18.4@sha256:e5c2bb2870d0e59ce917a5100311813b4ede96ce4eb0c6bfa879e3fbe3e83935
.PHONY: cilium-install
cilium-install:
# Add the Cilium repo
helm repo add cilium https://helm.cilium.io/
# install/upgrade the chart
helm upgrade --install cilium cilium/cilium --version $(CILIUM_VERSION) \
-f kind/kind-values-cilium.yaml \
-f kind/kind-values-cilium-hubble.yaml \
-f kind/kind-values-cilium-service-monitors.yaml \
--namespace kube-system \
--wait
.PHONY: cilium-install-ci
cilium-install-ci:
# Add the Cilium repo
helm repo add cilium https://helm.cilium.io/
# install/upgrade the chart
helm upgrade --install cilium cilium/cilium --version $(CILIUM_VERSION) \
-f kind/kind-values-cilium.yaml \
-f kind/kind-values-cilium-resources.yaml \
-f kind/kind-values-cilium-service-monitors.yaml \
--namespace kube-system \
--wait
.PHONY: cert-manager-deploy
cert-manager-deploy:
# prepare image(s)
docker pull quay.io/jetstack/cert-manager-controller:v$(CERT_MANAGER_CHART_VERSION)
docker pull quay.io/jetstack/cert-manager-webhook:v$(CERT_MANAGER_CHART_VERSION)
docker pull quay.io/jetstack/cert-manager-cainjector:v$(CERT_MANAGER_CHART_VERSION)
docker pull quay.io/jetstack/cert-manager-ctl:v$(CERT_MANAGER_CHART_VERSION)
kind load docker-image --name $(CLUSTER_NAME) quay.io/jetstack/cert-manager-controller:v$(CERT_MANAGER_CHART_VERSION)
kind load docker-image --name $(CLUSTER_NAME) quay.io/jetstack/cert-manager-webhook:v$(CERT_MANAGER_CHART_VERSION)
kind load docker-image --name $(CLUSTER_NAME) quay.io/jetstack/cert-manager-cainjector:v$(CERT_MANAGER_CHART_VERSION)
kind load docker-image --name $(CLUSTER_NAME) quay.io/jetstack/cert-manager-ctl:v$(CERT_MANAGER_CHART_VERSION)
#
helm repo add cert-manager https://charts.jetstack.io
helm upgrade --install \
cert-manager cert-manager/cert-manager \
--version "v${CERT_MANAGER_CHART_VERSION}" \
--namespace cert-manager \
--create-namespace \
--values kind/cert-manager.yaml \
--wait
.PHONY: argocd-deploy
argocd-deploy:
# prepare image(s)
# docker pull quay.io/argoproj/argocd:v2.3.4
# docker pull quay.io/argoproj/argocd-applicationset:v0.4.1
# docker pull redis:6.2.6-alpine
# docker pull bitnami/redis-exporter:1.26.0-debian-10-r2
# kind load docker-image --name $(CLUSTER_NAME) quay.io/argoproj/argocd:v2.3.4
# kind load docker-image --name $(CLUSTER_NAME) quay.io/argoproj/argocd-applicationset:v0.4.1
# kind load docker-image --name $(CLUSTER_NAME) redis:6.2.6-alpine
# kind load docker-image --name $(CLUSTER_NAME) bitnami/redis-exporter:1.26.0-debian-10-r2
# install
helm repo add argo https://argoproj.github.io/argo-helm
helm upgrade --install \
argocd-single \
argo/argo-cd \
--namespace argocd \
--create-namespace \
--version "${ARGOCD_CHART_VERSION}" \
-f kind/kind-values-argocd.yaml \
-f kind/kind-values-argocd-service-monitors.yaml \
--wait
# update CRDs
kubectl -n argocd apply -f argocd/argo-cd-crds.yaml
# kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo ""
.PHONY: nginx-ingress-deploy
nginx-ingress-deploy:
docker pull registry.k8s.io/ingress-nginx/controller:v1.3.0
docker pull registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.1.1
kind load docker-image --name $(CLUSTER_NAME) registry.k8s.io/ingress-nginx/controller:v1.3.0
kind load docker-image --name $(CLUSTER_NAME) registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.1.1
# ingress
kubectl -n argocd apply -f argocd/nginx-ingress.yaml
kubectl -n argocd apply -f argocd/gateway-api-crds.yaml
.PHONY: metrics-server-deploy
metrics-server-deploy:
kubectl -n argocd apply -f argocd/projects/system-kube.yaml
kubectl -n argocd apply -f argocd/metrics-server.yaml
.PHONY: prometheus-stack-deploy
prometheus-stack-deploy:
# projects
kubectl -n argocd apply -f argocd/projects/system-monitoring.yaml
# (update) CRDs
kubectl -n argocd apply -f argocd/prometheus-stack-crds.yaml
sleep 10
#monitoring
kubectl -n argocd apply -f argocd/prometheus-stack.yaml
kubectl -n argocd apply -f argocd/prometheus-adapter.yaml
.PHONY: starboard-deploy
starboard-deploy:
# projects
kubectl -n argocd apply -f argocd/projects/security-starboard.yaml
# (update) CRDs
kubectl -n argocd apply -f argocd/security-starboard.yaml
.PHONY: keptn-prepare-images
keptn-prepare-images:
# pull image locally
docker pull docker.io/bitnami/mongodb:4.4.13-debian-10-r33
docker pull docker.io/bitnami/mongodb-exporter:0.31.1-debian-10-r4
docker pull docker.io/keptn/distributor:$(KEPTN_VERSION)
docker pull docker.io/keptn/mongodb-datastore:$(KEPTN_VERSION)
docker pull docker.io/keptn/bridge2:$(KEPTN_VERSION)
docker pull nats:2.1.9-alpine3.12
docker pull synadia/prometheus-nats-exporter:0.5.0
docker pull docker.io/keptn/shipyard-controller:$(KEPTN_VERSION)
docker pull docker.io/keptn/jmeter-service:$(KEPTN_VERSION)
docker pull docker.io/keptn/helm-service:$(KEPTN_VERSION)
docker pull keptncontrib/prometheus-service:0.7.5
docker pull keptncontrib/argo-service:0.9.3
# docker pull docker.io/keptn/distributor:0.10.0
ifeq ($(TRIVY_IMAGE_CHECK), 1)
trivy image --severity=HIGH --exit-code=0 docker.io/bitnami/mongodb:4.4.13-debian-10-r33
trivy image --severity=HIGH --exit-code=0 docker.io/bitnami/mongodb-exporter:0.31.1-debian-10-r4
trivy image --severity=HIGH --exit-code=1 docker.io/keptn/distributor:$(KEPTN_VERSION)
trivy image --severity=HIGH --exit-code=0 docker.io/keptn/mongodb-datastore:$(KEPTN_VERSION)
trivy image --severity=HIGH --exit-code=0 docker.io/keptn/bridge2:$(KEPTN_VERSION)
trivy image --severity=HIGH --exit-code=0 nats:2.1.9-alpine3.12
trivy image --severity=HIGH --exit-code=1 synadia/prometheus-nats-exporter:0.5.0
trivy image --severity=HIGH --exit-code=1 docker.io/keptn/shipyard-controller:$(KEPTN_VERSION)
trivy image --severity=HIGH --exit-code=0 docker.io/keptn/jmeter-service:$(KEPTN_VERSION)
trivy image --severity=HIGH --exit-code=0 docker.io/keptn/helm-service:$(KEPTN_VERSION)
trivy image --severity=HIGH --exit-code=0 keptncontrib/prometheus-service:0.7.5
trivy image --severity=HIGH --exit-code=0 keptncontrib/argo-service:0.9.3
# trivy image --severity=HIGH --exit-code=0 docker.io/keptn/distributor:0.10.0
endif
# Load the image onto the cluster
kind load docker-image --name $(CLUSTER_NAME) docker.io/bitnami/mongodb:4.4.13-debian-10-r33
kind load docker-image --name $(CLUSTER_NAME) docker.io/bitnami/mongodb-exporter:0.31.1-debian-10-r4
kind load docker-image --name $(CLUSTER_NAME) docker.io/keptn/distributor:$(KEPTN_VERSION)
kind load docker-image --name $(CLUSTER_NAME) docker.io/keptn/mongodb-datastore:$(KEPTN_VERSION)
kind load docker-image --name $(CLUSTER_NAME) docker.io/keptn/bridge2:$(KEPTN_VERSION)
kind load docker-image --name $(CLUSTER_NAME) nats:2.1.9-alpine3.12
kind load docker-image --name $(CLUSTER_NAME) synadia/prometheus-nats-exporter:0.5.0
kind load docker-image --name $(CLUSTER_NAME) docker.io/keptn/shipyard-controller:$(KEPTN_VERSION)
kind load docker-image --name $(CLUSTER_NAME) docker.io/keptn/jmeter-service:$(KEPTN_VERSION)
kind load docker-image --name $(CLUSTER_NAME) docker.io/keptn/helm-service:$(KEPTN_VERSION)
kind load docker-image --name $(CLUSTER_NAME) keptncontrib/prometheus-service:0.7.5
kind load docker-image --name $(CLUSTER_NAME) keptncontrib/argo-service:0.9.3
# kind load docker-image --name $(CLUSTER_NAME) docker.io/keptn/distributor:0.10.0
.PHONY: keptn-delete
keptn-delete:
kubectl -n argocd delete -f argocd/keptn-nats.yaml || true
kubectl -n argocd delete -f argocd/keptn-mongodb.yaml || true
kubectl -n argocd delete -f argocd/keptn.yaml || true
kubectl delete ns keptn -R
.PHONY: keptn-deploy
keptn-deploy:
# kubectl label --overwrite ns keptn \
# pod-security.kubernetes.io/enforce=baseline \
# pod-security.kubernetes.io/enforce-version=latest \
# pod-security.kubernetes.io/warn=restricted \
# pod-security.kubernetes.io/warn-version=latest \
# pod-security.kubernetes.io/audit=restricted \
# pod-security.kubernetes.io/audit-version=latest
kubectl -n argocd apply -f argocd/argo-rollouts.yaml
kubectl -n argocd apply -f argocd/projects/system-keptn.yaml
kubectl -n argocd apply -f argocd/keptn-nats.yaml
kubectl -n argocd apply -f argocd/keptn-mongodb.yaml
kubectl -n argocd apply -f argocd/keptn.yaml
# helm repo add keptn https://charts.keptn.sh
# helm upgrade --install \
# keptn keptn/keptn \
# -n keptn \
# --create-namespace \
# --wait \
# -f kind/kind-values-keptn.yaml
# helm upgrade --install \
# helm-service \
# https://github.com/keptn/keptn/releases/download/$(KEPTN_VERSION)/helm-service-$(KEPTN_VERSION).tgz \
# -n keptn
# helm upgrade --install \
# jmeter-service https://github.com/keptn/keptn/releases/download/0.8.4/jmeter-service-0.8.4.tgz \
# -n keptn
# helm upgrade --install \
# -n keptn \
# prometheus-service \
# https://github.com/keptn-contrib/prometheus-service/releases/download/0.7.2/prometheus-service-0.7.2.tgz \
# --set=prometheus.endpoint="http://prometheus-stack-kube-prom-prometheus.monitoring.svc.cluster.local:9090"
# helm upgrade --install \
# -n keptn \
# argo-service \
# https://github.com/keptn-contrib/argo-service/releases/download/0.9.1/argo-service-0.9.1.tgz
# #
# kubectl apply -n monitoring \
# -f https://raw.githubusercontent.com/keptn-contrib/prometheus-service/0.7.2/deploy/role.yaml
.PHONY: keptn-gitops-operator-deploy
keptn-gitops-operator-deploy:
kubectl -n argocd apply -f argocd/projects/system-keptn.yaml
kubectl -n argocd apply -f argocd/keptn-gitops-operator.yaml
.PHONY: keptn-set-login
keptn-set-login:
kubectl create secret -n keptn generic bridge-credentials --from-literal="BASIC_AUTH_USERNAME=admin" --from-literal="BASIC_AUTH_PASSWORD=admin" -oyaml --dry-run=client | kubectl replace -f -
kubectl -n keptn rollout restart deployment bridge
keptn auth -n keptn --endpoint="http://bridge.127.0.0.1.nip.io"
# keptn configure bridge –action=expose
.PHONY: keptn-create-project-podtato-head
keptn-create-project-podtato-head:
keptn create project podtato-head --shipyard=keptn/podtato-head/shipyard.yaml
keptn create service helloservice --project=podtato-head
keptn add-resource --project=podtato-head --service=helloservice --all-stages --resource=./helm/helloservice.tgz
echo "Adding keptn quality-gates to project podtato-head"
keptn add-resource --project=podtato-head --stage=dev --service=helloservice --resource=keptn/podtato-head/prometheus/sli.yaml --resourceUri=prometheus/sli.yaml
keptn add-resource --project=podtato-head --stage=dev --service=helloservice --resource=keptn/podtato-head/slo.yaml --resourceUri=slo.yaml
#
echo "Adding jmeter load tests to project podtato-head"
keptn add-resource --project=podtato-head --stage=dev --service=helloservice --resource=keptn/podtato-head/jmeter/load.jmx --resourceUri=jmeter/load.jmx
keptn add-resource --project=podtato-head --stage=dev --service=helloservice --resource=keptn/podtato-head/jmeter/jmeter.conf.yaml --resourceUri=jmeter/jmeter.conf.yaml
echo "enable prometheus monitoring"
keptn configure monitoring prometheus --project=podtato-head --service=helloservice
echo "trigger delivery"
keptn trigger delivery --project=podtato-head --service=helloservice \
--image ghcr.io/podtato-head/podtatoserver:v0.1.1 \
--values "replicaCount=2" \
--values "serviceMonitor.enabled=true" \
--values "serviceMonitor.interval=5s" --values "serviceMonitor.scrapeTimeout=5s"
#
# keptn trigger evaluation --project=podtato-head --service=helloservice --stage=dev --timeframe=5m
.PHONY: keptn-deploy-correct-version-podtato-head
keptn-deploy-correct-version-podtato-head:
keptn trigger delivery --project=podtato-head --service=helloservice \
--image ghcr.io/podtato-head/podtatoserver:v0.1.1 \
--values "replicaCount=2" \
--values "serviceMonitor.enabled=true" \
--values "serviceMonitor.interval=5s" --values "serviceMonitor.scrapeTimeout=5s"
.PHONY: keptn-deploy-slow-version-podtato-head
keptn-deploy-slow-version-podtato-head:
keptn trigger delivery --project=podtato-head --service=helloservice \
--image="ghcr.io/podtato-head/podtatoserver" --tag=v0.1.2
.PHONY: keptn-helloserver-prepare-helm-charts
keptn-helloserver-prepare-helm-charts:
helm package ./helm/helloserver/ -d helm && mv helm/helloserver-`cat helm/helloserver/Chart.yaml |yq eval '.version' - |tr -d '\n'`.tgz helm/helloservice.tgz
.PHONY: keptn-redeploy-chart-podtato-head
keptn-redeploy-chart-podtato-head:
make keptn-helloserver-prepare-helm-charts && \
keptn add-resource --project=podtato-head --service=helloservice --all-stages --resource=./helm/helloservice.tgz && \
make keptn-deploy-correct-version-podtato-head
.PHONY: keptn-delete-project-podtato-head
keptn-delete-project-podtato-head:
keptn delete project podtato-head
kubectl delete ns podtato-head-dev || true
kubectl delete ns podtato-head-prod || true
# keptn delete service helloservice -p podtato-head
.PHONY: keptn-create-project-sockshop
keptn-create-project-sockshop:
keptn create project sockshop --shipyard=keptn/sockshop/shipyard.yaml
keptn create service carts --project=sockshop
keptn add-resource --project=sockshop --stage=prod --service=carts --resource=keptn/sockshop/jmeter/load.jmx --resourceUri=jmeter/load.jmx
keptn add-resource --project=sockshop --stage=prod --service=carts --resource=keptn/sockshop/slo-quality-gates.yaml --resourceUri=slo.yaml
keptn configure monitoring prometheus --project=sockshop --service=carts
keptn add-resource --project=sockshop --stage=prod --service=carts --resource=keptn/sockshop/sli-config-argo-prometheus.yaml --resourceUri=prometheus/sli.yaml
#
argocd app create --name carts-prod \
--repo https://github.com/keptn/examples.git --dest-server https://kubernetes.default.svc \
--dest-namespace sockshop-prod --path onboarding-carts/argo/carts --revision 0.11.0 \
--sync-policy none
.PHONY: test-network-apply-assets
test-network-apply-assets:
kubectl get ns test-network 1>/dev/null 2>/dev/null || kubectl create ns test-network
kubectl apply -n test-network -k tests/assets/k8s/podinfo --wait=true
kubectl apply -n test-network -f tests/assets/k8s/client --wait=true
kubectl apply -n test-network -f tests/assets/k8s/networkpolicy --wait=true
.PHONY: test-network-check-status
test-network-check-status:
# linkerd top deployment/podinfo --namespace test-network
# linkerd tap deployment/client --namespace test-network
kubectl exec -n test-network deploy/client -c client -- curl -s podinfo:9898
.PHONY: run-ginkgo
run-ginkgo:
cd tests/e2e && go test