Skip to content

Commit 42e4820

Browse files
committed
Add fixes to CI
Without this, 1.24 is deploying 1.25, and the CI would actually fail.
1 parent 994c3c9 commit 42e4820

File tree

7 files changed

+51
-46
lines changed

7 files changed

+51
-46
lines changed

.github/workflows/e2eCI.yaml

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ jobs:
1212
- 1.21
1313
- 1.23
1414
- 1.24
15+
- 1.25
1516
- 1.26
1617
steps:
1718
- name: checkout
@@ -21,36 +22,31 @@ jobs:
2122
uses: actions/setup-go@v4
2223
with:
2324
go-version: '1.20'
24-
25+
26+
- name: Export build version
27+
run: echo "kotary_image=kotary:${{ github.sha }}" >> "$GITHUB_ENV"
28+
2529
- name: Docker Build image
26-
run: |
27-
docker build -t "ci/kotary:${{ matrix.kubernetes }}" .
28-
docker image ls | grep ci
30+
run: docker build -t $kotary_image .
2931

3032
- name: Create cluster KinD
3133
uses: helm/kind-action@v1.5.0
3234
with:
3335
config: e2e/KindConfig/kind-cluster-${{ matrix.kubernetes }}.yaml
3436

3537
- name: testing cluster kinD
36-
run: |
37-
kubectl cluster-info --context kind-chart-testing echo " current-context:" $(kubectl config current-context)
38-
kubectl get all --all-namespaces
39-
38+
run: kubectl get pods --all-namespaces
39+
4040
- name: Load docker image into kind cluster
41-
run: kind load docker-image "ci/kotary:${{ matrix.kubernetes }}" --name chart-testing
42-
43-
- name: Set GOROOT
44-
run: echo "export GOROOT=/opt/hostedtoolcache/go/1.20/x64" >> $GITHUB_ENV
45-
41+
run: kind load docker-image "$kotary_image"
42+
4643
- name: Deploy CRD
4744
run: kubectl apply -f artifacts/crd.yml
4845

4946
- name: Edit kotary deployement
5047
run: |
51-
version=${{ matrix.kubernetes }}
52-
sed -i -E -e "s/cagip\/kotary:v[0-9.]+/ci\/kotary:$version/g" artifacts/deployment.yml -e "s/Always/Never/g" artifacts/deployment.yml;
48+
sed -i -E -e "s#cagip/kotary:v[0-9.]+#$kotary_image#g" artifacts/deployment.yml -e "s#Always#Never#g" artifacts/deployment.yml;
5349
cat artifacts/deployment.yml
5450
5551
- name: run tests
56-
run: ./e2e/e2e.sh
52+
run: ./e2e/e2e.sh

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,4 +22,5 @@ vendor/
2222
*.njsproj
2323
*.sln
2424
coverage.html
25-
coverage.out
25+
coverage.out
26+
temp.json

artifacts/deployment.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ spec:
5656
serviceAccountName: kotary
5757
containers:
5858
- name: kotary
59-
image: cagip/kotary:v0.24.0
60-
imagePullPolicy: Always
59+
image: ci/kotary:1.25-994c3c9fa25722f7da25a1d070af61c5333b6f25
60+
imagePullPolicy: Never
6161
envFrom:
6262
- configMapRef:
6363
name: kotary-config

cmd/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ var (
2727
kubeconfig string
2828
)
2929

30-
const resyncPeriod = time.Minute * 30
30+
const resyncPeriod = time.Seconds * 10
3131

3232
func main() {
3333
flag.StringVar(&kubeconfig, "kubeconfig", defaultKubeconfig(), "Path to a kubeconfig. Only required if out-of-cluster.")

e2e/KindConfig/kind-cluster-1.24.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@ kind: Cluster
22
apiVersion: kind.x-k8s.io/v1alpha4
33
nodes:
44
- role: control-plane
5-
image: "kindest/node:v1.25.11"
5+
image: "kindest/node:v1.24.11"
66
- role: worker
7-
image: "kindest/node:v1.25.11"
7+
image: "kindest/node:v1.24.11"
88
- role: worker
9-
image: "kindest/node:v1.25.11"
9+
image: "kindest/node:v1.24.11"
1010
- role: worker
11-
image: "kindest/node:v1.25.11"
11+
image: "kindest/node:v1.24.11"

e2e/KindConfig/kind-cluster-1.25.yaml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
kind: Cluster
2+
apiVersion: kind.x-k8s.io/v1alpha4
3+
nodes:
4+
- role: control-plane
5+
image: "kindest/node:v1.25.0"
6+
- role: worker
7+
image: "kindest/node:v1.25.0"
8+
- role: worker
9+
image: "kindest/node:v1.25.0"
10+
- role: worker
11+
image: "kindest/node:v1.25.0"

e2e/e2e.sh

Lines changed: 19 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,11 @@
22

33
#
44
# A FEW INFORMATIONS ABOUT THIS SCRIPT
5-
#
6-
# This script is used to test the kotary operator, it replicates what a really
7-
# simple a basic usage of kotary should look like.
8-
# This script is not an exaustive list of test of the operator, it is closer to
9-
# an end to end test because each test depends of the previous one and fails one the first error encountered.
10-
#
11-
# This test should be used to verify that every basic features of kotary is working fine.
5+
#
6+
# This script is used to test the kotary operator, it replicates basic usage of kotary.
7+
# It sole purpose is to test end to end. (Not unit/perf/...) testing.
8+
# Sadly, this is a single scenario, and all the steps are necessary.
9+
#
1210
# It is not meant to be used to debug a particular part of the operator.
1311
#
1412
# HOW THIS SCRIPT WORKS:
@@ -17,10 +15,10 @@
1715
# (if you are using kind don't forget to load you image in the cluster)
1816
# Then it goes into a series of tests described in the code below.
1917
# If any error occurs or on any unwanted behavior, the script ends and starts the CleanUp function
20-
# to remoove what have been used during the test.
18+
# to remove what have been used during the test.
2119
# Note that you can uncomment some lines in the CleanUp function depending of your needs.
2220
# If everything goes as intended the script will exit with a code 0 and cleanup the evironment.
23-
#
21+
#
2422
# /!\ This script is in no way perfect, feel free to add new tests at the end of the script if you
2523
# believe that the script needs some more coverage.
2624
#
@@ -57,7 +55,7 @@ CleanUp () {
5755
rm temp.json
5856
}
5957

60-
trap CleanUp EXIT ERR
58+
#trap CleanUp EXIT ERR
6159

6260
echo -e "${BLUE}====== Starting SetUp ======${NC} \\n"
6361

@@ -75,9 +73,9 @@ while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null
7573
#This is the test part
7674
echo -e "\\n${BLUE}====== Starting Tests ======${NC}\\n"
7775

78-
#Trying to apply a rqc and verify that the claim is accepted (an accepted claim is deleted from the queue so it does not return anything)
76+
#Trying to apply a rqc and verify that the claim is accepted (an accepted claim is deleted from the queue so it does not return anything)
7977
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS && sleep 3
80-
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim
78+
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim
8179
phase=$(jq ' .items[].status.phase' temp.json) #get the status of the claim if the claim has been accepted $phase will be empty
8280
if [ "$phase" != "" ]; #if the phase isn't empty, then it is an error
8381
then echo -e "\\n${RED}FAILLED! error durring Claim test: the Claim is $phase. Should be accepted ${NC}" && exit 1 ; fi
@@ -92,23 +90,23 @@ kubectl apply -f $ROOT/e2e/KindConfig/pod3.yml -n $NS
9290
kubectl apply -f $ROOT/e2e/KindConfig/pod4.yml -n $NS
9391
echo -e "\\n ${PURPLE}Should be 'cpu: 500m/660m, memory: 1000Mi/1Gi'${NC}"
9492
if ! kubectl get resourcequota -n $NS | grep "cpu: 500m/660m, memory: 1000Mi/1Gi";
95-
then echo -e "\\n${RED}FAILLED! Error, the expected specs are not the same as the actual ones.${NC}" && exit 1 ; fi
93+
then echo -e "\\n${RED}FAILED! Error, the expected specs are not the same as the actual ones.${NC}" && exit 1 ; fi
9694
echo -e "${GREEN} -- OK --${NC}\\n"
9795

9896
# Verify that trying to add a pod with resources exceeding what is left to use results in an error
9997
echo -e "\\n ${PURPLE}-- Trying to add a pod over max ressources (must be forbidden) --${NC}" && sleep 3
10098
if kubectl apply -f $ROOT/e2e/KindConfig/pod5.yml -n $NS ; # if the command does NOT result in an error then the test fails
101-
then echo -e "\\n${RED}FAILLED! error durring Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && exit 1 ; fi
99+
then echo -e "\\n${RED}FAILED! error during Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && exit 1 ; fi
102100
echo -e "${GREEN} -- OK --${NC}\\n"
103101

104102
# Apply a new quotaclaim to scale up the resourses
105103
# verify that the claim is accepted (nothing should appear in the 'status' field)
106104
echo -e "\\n ${PURPLE}-- Scale UP --${NC}"
107105
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimUp.yaml -n $NS && sleep 3 #apply the new rqc
108-
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim
106+
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim
109107
phase=$(jq ' .items[].status.phase' temp.json) #get the status of the claim if the claim has been accepted $phase will be empty
110108
if [ "$phase" != "" ]; #if the phase isn't empty, then it is an error
111-
then echo -e "\\n${RED}FAILLED! error durring Scale UP: the Claim is $phase ${NC}\\n" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
109+
then echo -e "\\n${RED}FAILED! error during Scale UP: the Claim is $phase ${NC}\\n" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
112110
echo -e "${GREEN} -- OK --${NC}\\n"
113111

114112
# Apply a new quotaclaim to scale up the resourses but this claim is to big,
@@ -119,7 +117,7 @@ kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimToBig.yaml -n $NS && sleep 3
119117
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json
120118
phase=$(jq ' .items[].status.phase' temp.json)
121119
if [ "$phase" != "\"REJECTED\"" ]; #The claim MUST be rejected, else it is an error
122-
then echo -e "\\n${RED}FAILLED! error durring Scale UP(to big): the Claim has not been rejected${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
120+
then echo -e "\\n${RED}FAILED! error during Scale UP (to big): the Claim has not been rejected${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
123121
echo -e "${GREEN} -- OK --${NC}\\n" && sleep 3
124122

125123
# Apply a new quotaclaim to scale down the resourses,
@@ -130,19 +128,18 @@ kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimPending.yaml -n $NS && sleep
130128
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json
131129
phase=$(jq ' .items[].status.phase' temp.json)
132130
if [ "$phase" != "\"PENDING\"" ]; #The claim MUST be pending, else it is an error
133-
then echo -e "\\n${RED}FAILLED! error durring pending test: the Claim is not set to PENDING${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
131+
then echo -e "\\n${RED}FAILED! error during pending test: the Claim is not set to PENDING${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
134132
echo -e "${GREEN} -- OK --${NC}\\n"
135133

136134
# Reduce the current usage of cpu and memory by deleting a pod
137-
echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}" && sleep 3
135+
echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}"
138136
kubectl delete pod -n $NS podtest-4 && sleep 3
139-
140137
# assert that, after deletion of the pod, the 'pending' claim is now accepted
141138
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json
142139
phase=$(jq ' .items[].status.phase' temp.json)
143140
if [ "$phase" != "" ]; #The status must be empty because the claim should now be accepted. (remember: empty=accepted)
144-
then echo -e "\\n${RED}FAILLED! error durring pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1; fi
141+
then echo -e "\\n${RED}FAILED! error during pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1; fi
145142
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS && sleep 3
146143
echo -e "${GREEN} -- OK --${NC}\\n"
147144

148-
echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}"
145+
echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}"

0 commit comments

Comments
 (0)