Skip to content

Commit

Permalink
Add fixes to CI
Browse files Browse the repository at this point in the history
Without this, 1.24 is deploying 1.25, and the CI would actually
fail.
  • Loading branch information
evrardjp committed Feb 29, 2024
1 parent 994c3c9 commit 39f0dd3
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 43 deletions.
28 changes: 12 additions & 16 deletions .github/workflows/e2eCI.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ jobs:
- 1.21
- 1.23
- 1.24
- 1.25
- 1.26
steps:
- name: checkout
Expand All @@ -21,36 +22,31 @@ jobs:
uses: actions/setup-go@v4
with:
go-version: '1.20'


- name: Export build version
run: echo "kotary_image=kotary:${{ github.sha }}" >> "$GITHUB_ENV"

- name: Docker Build image
run: |
docker build -t "ci/kotary:${{ matrix.kubernetes }}" .
docker image ls | grep ci
run: docker build -t $kotary_image .

- name: Create cluster KinD
uses: helm/kind-action@v1.5.0
with:
config: e2e/KindConfig/kind-cluster-${{ matrix.kubernetes }}.yaml

- name: testing cluster kinD
run: |
kubectl cluster-info --context kind-chart-testing echo " current-context:" $(kubectl config current-context)
kubectl get all --all-namespaces
run: kubectl get pods --all-namespaces

- name: Load docker image into kind cluster
run: kind load docker-image "ci/kotary:${{ matrix.kubernetes }}" --name chart-testing

- name: Set GOROOT
run: echo "export GOROOT=/opt/hostedtoolcache/go/1.20/x64" >> $GITHUB_ENV

run: kind load docker-image "$kotary_image"

- name: Deploy CRD
run: kubectl apply -f artifacts/crd.yml

- name: Edit kotary deployement
run: |
version=${{ matrix.kubernetes }}
sed -i -E -e "s/cagip\/kotary:v[0-9.]+/ci\/kotary:$version/g" artifacts/deployment.yml -e "s/Always/Never/g" artifacts/deployment.yml;
sed -i -E -e "s#cagip/kotary:v[0-9.]+#$kotary_image#g" artifacts/deployment.yml -e "s#Always#Never#g" artifacts/deployment.yml;
cat artifacts/deployment.yml
- name: run tests
run: ./e2e/e2e.sh
run: ./e2e/e2e.sh
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,5 @@ vendor/
*.njsproj
*.sln
coverage.html
coverage.out
coverage.out
temp.json
8 changes: 4 additions & 4 deletions e2e/KindConfig/kind-cluster-1.24.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@ kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.25.11"
image: "kindest/node:v1.24.11"
- role: worker
image: "kindest/node:v1.25.11"
image: "kindest/node:v1.24.11"
- role: worker
image: "kindest/node:v1.25.11"
image: "kindest/node:v1.24.11"
- role: worker
image: "kindest/node:v1.25.11"
image: "kindest/node:v1.24.11"
11 changes: 11 additions & 0 deletions e2e/KindConfig/kind-cluster-1.25.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.25.0"
- role: worker
image: "kindest/node:v1.25.0"
- role: worker
image: "kindest/node:v1.25.0"
- role: worker
image: "kindest/node:v1.25.0"
41 changes: 19 additions & 22 deletions e2e/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,11 @@

#
# A FEW INFORMATIONS ABOUT THIS SCRIPT
#
# This script is used to test the kotary operator, it replicates what a really
# simple a basic usage of kotary should look like.
# This script is not an exaustive list of test of the operator, it is closer to
# an end to end test because each test depends of the previous one and fails one the first error encountered.
#
# This test should be used to verify that every basic features of kotary is working fine.
#
# This script is used to test the kotary operator, it replicates basic usage of kotary.
# It sole purpose is to test end to end. (Not unit/perf/...) testing.
# Sadly, this is a single scenario, and all the steps are necessary.
#
# It is not meant to be used to debug a particular part of the operator.
#
# HOW THIS SCRIPT WORKS:
Expand All @@ -17,10 +15,10 @@
# (if you are using kind don't forget to load you image in the cluster)
# Then it goes into a series of tests described in the code below.
# If any error occurs or on any unwanted behavior, the script ends and starts the CleanUp function
# to remoove what have been used during the test.
# to remove what have been used during the test.
# Note that you can uncomment some lines in the CleanUp function depending of your needs.
# If everything goes as intended the script will exit with a code 0 and cleanup the evironment.
#
#
# /!\ This script is in no way perfect, feel free to add new tests at the end of the script if you
# believe that the script needs some more coverage.
#
Expand Down Expand Up @@ -57,7 +55,7 @@ CleanUp () {
rm temp.json
}

trap CleanUp EXIT ERR
#trap CleanUp EXIT ERR

echo -e "${BLUE}====== Starting SetUp ======${NC} \\n"

Expand All @@ -75,9 +73,9 @@ while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null
#This is the test part
echo -e "\\n${BLUE}====== Starting Tests ======${NC}\\n"

#Trying to apply a rqc and verify that the claim is accepted (an accepted claim is deleted from the queue so it does not return anything)
#Trying to apply a rqc and verify that the claim is accepted (an accepted claim is deleted from the queue so it does not return anything)
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS && sleep 3
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim
phase=$(jq ' .items[].status.phase' temp.json) #get the status of the claim if the claim has been accepted $phase will be empty
if [ "$phase" != "" ]; #if the phase isn't empty, then it is an error
then echo -e "\\n${RED}FAILLED! error durring Claim test: the Claim is $phase. Should be accepted ${NC}" && exit 1 ; fi
Expand All @@ -92,23 +90,23 @@ kubectl apply -f $ROOT/e2e/KindConfig/pod3.yml -n $NS
kubectl apply -f $ROOT/e2e/KindConfig/pod4.yml -n $NS
echo -e "\\n ${PURPLE}Should be 'cpu: 500m/660m, memory: 1000Mi/1Gi'${NC}"
if ! kubectl get resourcequota -n $NS | grep "cpu: 500m/660m, memory: 1000Mi/1Gi";
then echo -e "\\n${RED}FAILLED! Error, the expected specs are not the same as the actual ones.${NC}" && exit 1 ; fi
then echo -e "\\n${RED}FAILED! Error, the expected specs are not the same as the actual ones.${NC}" && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

# Verify that trying to add a pod with resources exceeding what is left to use results in an error
echo -e "\\n ${PURPLE}-- Trying to add a pod over max ressources (must be forbidden) --${NC}" && sleep 3
if kubectl apply -f $ROOT/e2e/KindConfig/pod5.yml -n $NS ; # if the command does NOT result in an error then the test fails
then echo -e "\\n${RED}FAILLED! error durring Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && exit 1 ; fi
then echo -e "\\n${RED}FAILED! error during Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

# Apply a new quotaclaim to scale up the resourses
# verify that the claim is accepted (nothing should appear in the 'status' field)
echo -e "\\n ${PURPLE}-- Scale UP --${NC}"
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimUp.yaml -n $NS && sleep 3 #apply the new rqc
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim
phase=$(jq ' .items[].status.phase' temp.json) #get the status of the claim if the claim has been accepted $phase will be empty
if [ "$phase" != "" ]; #if the phase isn't empty, then it is an error
then echo -e "\\n${RED}FAILLED! error durring Scale UP: the Claim is $phase ${NC}\\n" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
then echo -e "\\n${RED}FAILED! error during Scale UP: the Claim is $phase ${NC}\\n" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

# Apply a new quotaclaim to scale up the resourses but this claim is to big,
Expand All @@ -119,7 +117,7 @@ kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimToBig.yaml -n $NS && sleep 3
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json
phase=$(jq ' .items[].status.phase' temp.json)
if [ "$phase" != "\"REJECTED\"" ]; #The claim MUST be rejected, else it is an error
then echo -e "\\n${RED}FAILLED! error durring Scale UP(to big): the Claim has not been rejected${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
then echo -e "\\n${RED}FAILED! error during Scale UP (to big): the Claim has not been rejected${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n" && sleep 3

# Apply a new quotaclaim to scale down the resourses,
Expand All @@ -130,19 +128,18 @@ kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimPending.yaml -n $NS && sleep
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json
phase=$(jq ' .items[].status.phase' temp.json)
if [ "$phase" != "\"PENDING\"" ]; #The claim MUST be pending, else it is an error
then echo -e "\\n${RED}FAILLED! error durring pending test: the Claim is not set to PENDING${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
then echo -e "\\n${RED}FAILED! error during pending test: the Claim is not set to PENDING${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

# Reduce the current usage of cpu and memory by deleting a pod
echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}" && sleep 3
echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}"
kubectl delete pod -n $NS podtest-4 && sleep 3

# assert that, after deletion of the pod, the 'pending' claim is now accepted
kubectl get $QUOTACLAIM -n $NS -o=json > temp.json
phase=$(jq ' .items[].status.phase' temp.json)
if [ "$phase" != "" ]; #The status must be empty because the claim should now be accepted. (remember: empty=accepted)
then echo -e "\\n${RED}FAILLED! error durring pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1; fi
then echo -e "\\n${RED}FAILED! error during pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1; fi
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS && sleep 3
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}"
echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}"

0 comments on commit 39f0dd3

Please sign in to comment.