From 39f0dd3d73d4a14f1d47f3ad98b25747ef7bceb8 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Evrard Date: Thu, 29 Feb 2024 15:16:55 +0100 Subject: [PATCH] Add fixes to CI Without this, 1.24 is deploying 1.25, and the CI would actually fail. --- .github/workflows/e2eCI.yaml | 28 ++++++++---------- .gitignore | 3 +- e2e/KindConfig/kind-cluster-1.24.yaml | 8 +++--- e2e/KindConfig/kind-cluster-1.25.yaml | 11 +++++++ e2e/e2e.sh | 41 +++++++++++++-------------- 5 files changed, 48 insertions(+), 43 deletions(-) create mode 100644 e2e/KindConfig/kind-cluster-1.25.yaml diff --git a/.github/workflows/e2eCI.yaml b/.github/workflows/e2eCI.yaml index 0357b45..ce3a508 100644 --- a/.github/workflows/e2eCI.yaml +++ b/.github/workflows/e2eCI.yaml @@ -12,6 +12,7 @@ jobs: - 1.21 - 1.23 - 1.24 + - 1.25 - 1.26 steps: - name: checkout @@ -21,11 +22,12 @@ jobs: uses: actions/setup-go@v4 with: go-version: '1.20' - + + - name: Export build version + run: echo "kotary_image=kotary:${{ github.sha }}" >> "$GITHUB_ENV" + - name: Docker Build image - run: | - docker build -t "ci/kotary:${{ matrix.kubernetes }}" . - docker image ls | grep ci + run: docker build -t $kotary_image . - name: Create cluster KinD uses: helm/kind-action@v1.5.0 @@ -33,24 +35,18 @@ jobs: config: e2e/KindConfig/kind-cluster-${{ matrix.kubernetes }}.yaml - name: testing cluster kinD - run: | - kubectl cluster-info --context kind-chart-testing echo " current-context:" $(kubectl config current-context) - kubectl get all --all-namespaces - + run: kubectl get pods --all-namespaces + - name: Load docker image into kind cluster - run: kind load docker-image "ci/kotary:${{ matrix.kubernetes }}" --name chart-testing - - - name: Set GOROOT - run: echo "export GOROOT=/opt/hostedtoolcache/go/1.20/x64" >> $GITHUB_ENV - + run: kind load docker-image "$kotary_image" + - name: Deploy CRD run: kubectl apply -f artifacts/crd.yml - name: Edit kotary deployement run: | - version=${{ matrix.kubernetes }} - sed -i -E -e "s/cagip\/kotary:v[0-9.]+/ci\/kotary:$version/g" artifacts/deployment.yml -e "s/Always/Never/g" artifacts/deployment.yml; + sed -i -E -e "s#cagip/kotary:v[0-9.]+#$kotary_image#g" artifacts/deployment.yml -e "s#Always#Never#g" artifacts/deployment.yml; cat artifacts/deployment.yml - name: run tests - run: ./e2e/e2e.sh \ No newline at end of file + run: ./e2e/e2e.sh diff --git a/.gitignore b/.gitignore index 3c50198..47c6d53 100644 --- a/.gitignore +++ b/.gitignore @@ -22,4 +22,5 @@ vendor/ *.njsproj *.sln coverage.html -coverage.out \ No newline at end of file +coverage.out +temp.json diff --git a/e2e/KindConfig/kind-cluster-1.24.yaml b/e2e/KindConfig/kind-cluster-1.24.yaml index bacf0f0..212d13c 100644 --- a/e2e/KindConfig/kind-cluster-1.24.yaml +++ b/e2e/KindConfig/kind-cluster-1.24.yaml @@ -2,10 +2,10 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane - image: "kindest/node:v1.25.11" + image: "kindest/node:v1.24.11" - role: worker - image: "kindest/node:v1.25.11" + image: "kindest/node:v1.24.11" - role: worker - image: "kindest/node:v1.25.11" + image: "kindest/node:v1.24.11" - role: worker - image: "kindest/node:v1.25.11" + image: "kindest/node:v1.24.11" diff --git a/e2e/KindConfig/kind-cluster-1.25.yaml b/e2e/KindConfig/kind-cluster-1.25.yaml new file mode 100644 index 0000000..da0fca9 --- /dev/null +++ b/e2e/KindConfig/kind-cluster-1.25.yaml @@ -0,0 +1,11 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + image: "kindest/node:v1.25.0" + - role: worker + image: "kindest/node:v1.25.0" + - role: worker + image: "kindest/node:v1.25.0" + - role: worker + image: "kindest/node:v1.25.0" diff --git a/e2e/e2e.sh b/e2e/e2e.sh index 8b2f3f9..75bdb62 100755 --- a/e2e/e2e.sh +++ b/e2e/e2e.sh @@ -2,13 +2,11 @@ # # A FEW INFORMATIONS ABOUT THIS SCRIPT -# -# This script is used to test the kotary operator, it replicates what a really -# simple a basic usage of kotary should look like. -# This script is not an exaustive list of test of the operator, it is closer to -# an end to end test because each test depends of the previous one and fails one the first error encountered. -# -# This test should be used to verify that every basic features of kotary is working fine. +# +# This script is used to test the kotary operator, it replicates basic usage of kotary. +# It sole purpose is to test end to end. (Not unit/perf/...) testing. +# Sadly, this is a single scenario, and all the steps are necessary. +# # It is not meant to be used to debug a particular part of the operator. # # HOW THIS SCRIPT WORKS: @@ -17,10 +15,10 @@ # (if you are using kind don't forget to load you image in the cluster) # Then it goes into a series of tests described in the code below. # If any error occurs or on any unwanted behavior, the script ends and starts the CleanUp function -# to remoove what have been used during the test. +# to remove what have been used during the test. # Note that you can uncomment some lines in the CleanUp function depending of your needs. # If everything goes as intended the script will exit with a code 0 and cleanup the evironment. -# +# # /!\ This script is in no way perfect, feel free to add new tests at the end of the script if you # believe that the script needs some more coverage. # @@ -57,7 +55,7 @@ CleanUp () { rm temp.json } -trap CleanUp EXIT ERR +#trap CleanUp EXIT ERR echo -e "${BLUE}====== Starting SetUp ======${NC} \\n" @@ -75,9 +73,9 @@ while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null #This is the test part echo -e "\\n${BLUE}====== Starting Tests ======${NC}\\n" -#Trying to apply a rqc and verify that the claim is accepted (an accepted claim is deleted from the queue so it does not return anything) +#Trying to apply a rqc and verify that the claim is accepted (an accepted claim is deleted from the queue so it does not return anything) kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS && sleep 3 -kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim +kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim phase=$(jq ' .items[].status.phase' temp.json) #get the status of the claim if the claim has been accepted $phase will be empty if [ "$phase" != "" ]; #if the phase isn't empty, then it is an error then echo -e "\\n${RED}FAILLED! error durring Claim test: the Claim is $phase. Should be accepted ${NC}" && exit 1 ; fi @@ -92,23 +90,23 @@ kubectl apply -f $ROOT/e2e/KindConfig/pod3.yml -n $NS kubectl apply -f $ROOT/e2e/KindConfig/pod4.yml -n $NS echo -e "\\n ${PURPLE}Should be 'cpu: 500m/660m, memory: 1000Mi/1Gi'${NC}" if ! kubectl get resourcequota -n $NS | grep "cpu: 500m/660m, memory: 1000Mi/1Gi"; - then echo -e "\\n${RED}FAILLED! Error, the expected specs are not the same as the actual ones.${NC}" && exit 1 ; fi + then echo -e "\\n${RED}FAILED! Error, the expected specs are not the same as the actual ones.${NC}" && exit 1 ; fi echo -e "${GREEN} -- OK --${NC}\\n" # Verify that trying to add a pod with resources exceeding what is left to use results in an error echo -e "\\n ${PURPLE}-- Trying to add a pod over max ressources (must be forbidden) --${NC}" && sleep 3 if kubectl apply -f $ROOT/e2e/KindConfig/pod5.yml -n $NS ; # if the command does NOT result in an error then the test fails - then echo -e "\\n${RED}FAILLED! error durring Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && exit 1 ; fi + then echo -e "\\n${RED}FAILED! error during Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && exit 1 ; fi echo -e "${GREEN} -- OK --${NC}\\n" # Apply a new quotaclaim to scale up the resourses # verify that the claim is accepted (nothing should appear in the 'status' field) echo -e "\\n ${PURPLE}-- Scale UP --${NC}" kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimUp.yaml -n $NS && sleep 3 #apply the new rqc -kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim +kubectl get $QUOTACLAIM -n $NS -o=json > temp.json #get the claim phase=$(jq ' .items[].status.phase' temp.json) #get the status of the claim if the claim has been accepted $phase will be empty if [ "$phase" != "" ]; #if the phase isn't empty, then it is an error - then echo -e "\\n${RED}FAILLED! error durring Scale UP: the Claim is $phase ${NC}\\n" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi + then echo -e "\\n${RED}FAILED! error during Scale UP: the Claim is $phase ${NC}\\n" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi echo -e "${GREEN} -- OK --${NC}\\n" # Apply a new quotaclaim to scale up the resourses but this claim is to big, @@ -119,7 +117,7 @@ kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimToBig.yaml -n $NS && sleep 3 kubectl get $QUOTACLAIM -n $NS -o=json > temp.json phase=$(jq ' .items[].status.phase' temp.json) if [ "$phase" != "\"REJECTED\"" ]; #The claim MUST be rejected, else it is an error - then echo -e "\\n${RED}FAILLED! error durring Scale UP(to big): the Claim has not been rejected${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi + then echo -e "\\n${RED}FAILED! error during Scale UP (to big): the Claim has not been rejected${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi echo -e "${GREEN} -- OK --${NC}\\n" && sleep 3 # Apply a new quotaclaim to scale down the resourses, @@ -130,19 +128,18 @@ kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimPending.yaml -n $NS && sleep kubectl get $QUOTACLAIM -n $NS -o=json > temp.json phase=$(jq ' .items[].status.phase' temp.json) if [ "$phase" != "\"PENDING\"" ]; #The claim MUST be pending, else it is an error - then echo -e "\\n${RED}FAILLED! error durring pending test: the Claim is not set to PENDING${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi + then echo -e "\\n${RED}FAILED! error during pending test: the Claim is not set to PENDING${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1 ; fi echo -e "${GREEN} -- OK --${NC}\\n" # Reduce the current usage of cpu and memory by deleting a pod -echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}" && sleep 3 +echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}" kubectl delete pod -n $NS podtest-4 && sleep 3 - # assert that, after deletion of the pod, the 'pending' claim is now accepted kubectl get $QUOTACLAIM -n $NS -o=json > temp.json phase=$(jq ' .items[].status.phase' temp.json) if [ "$phase" != "" ]; #The status must be empty because the claim should now be accepted. (remember: empty=accepted) - then echo -e "\\n${RED}FAILLED! error durring pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1; fi + then echo -e "\\n${RED}FAILED! error during pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get $QUOTACLAIM -n $NS && exit 1; fi kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS && sleep 3 echo -e "${GREEN} -- OK --${NC}\\n" -echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}" \ No newline at end of file +echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}"