diff --git a/.github/workflows/test-suite.yaml b/.github/workflows/test-suite.yaml index 1478e1969a..155890ade9 100644 --- a/.github/workflows/test-suite.yaml +++ b/.github/workflows/test-suite.yaml @@ -109,13 +109,6 @@ jobs: - name: On Failure, Dump Server Logs if: ${{ failure() }} run: cat ./tests/integration/${{ matrix.itest }}/r2log.txt - - name: On Failure, Launch Debug Session - uses: dereknola/action-upterm@v1.1 - if: ${{ failure() }} - with: - ## If no one connects after 5 minutes, shut down server. - wait-timeout-minutes: 5 - limit-access-to-actor: true e2e: name: "E2E Tests" @@ -125,7 +118,7 @@ jobs: strategy: fail-fast: false matrix: - etest: [dnscache] + etest: [dnscache, kine] max-parallel: 3 steps: - name: "Checkout" @@ -168,11 +161,4 @@ jobs: - name: Run ${{ matrix.etest }} Test run: | cd tests/e2e/${{ matrix.etest }} - go test -v -timeout=45m ./${{ matrix.etest}}_test.go -ci -local - - name: On Failure, Launch Debug Session - uses: dereknola/action-upterm@v1.1 - if: ${{ failure() }} - with: - ## If no one connects after 5 minutes, shut down server. - wait-timeout-minutes: 5 - limit-access-to-actor: true \ No newline at end of file + go test -v -timeout=45m ./${{ matrix.etest}}_test.go -ci -local \ No newline at end of file diff --git a/tests/e2e/kine/Vagrantfile b/tests/e2e/kine/Vagrantfile new file mode 100644 index 0000000000..b099c4b725 --- /dev/null +++ b/tests/e2e/kine/Vagrantfile @@ -0,0 +1,157 @@ +ENV['VAGRANT_NO_PARALLEL'] = ENV['E2E_STANDUP_PARALLEL'] ? nil : 'no' +NODE_ROLES = (ENV['E2E_NODE_ROLES'] || + ["server-0", "server-1", "server-2", "agent-0"]) +NODE_BOXES = (ENV['E2E_NODE_BOXES'] || + ['bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04']) +GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") +RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +HARDENED = (ENV['E2E_HARDENED'] || "") +EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "mysql") +NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i +NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 3072).to_i +CNI = (ENV['E2E_CNI'] || "canal") # canal, cilium and calico supported +REGISTRY = (ENV['E2E_REGISTRY'] || "") +# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks, +# see https://www.virtualbox.org/manual/ch06.html#network_hostonly +NETWORK_PREFIX = "10.10.10" +install_type = "" + +def provision(vm, role, role_num, node_num) + vm.box = NODE_BOXES[node_num] + vm.hostname = "#{role[0]}-#{role_num}" + # An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32 + node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" + vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" + + scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" + vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + load vagrant_defaults + + defaultOSConfigure(vm) + db_type = getDBType(role, role_num, vm) + + install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) + + if !HARDENED.empty? + cisPrep(vm) + end + + if role.include?("server") && role_num == 0 + vm.provision :rke2, run: 'once' do |rke2| + rke2.env = %W[INSTALL_RKE2_TYPE=server #{install_type}] + rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + rke2.config = <<~YAML + write-kubeconfig-mode: '0644' + node-external-ip: #{NETWORK_PREFIX}.100 + node-ip: #{NETWORK_PREFIX}.100 + token: vagrant-rke2 + cni: #{CNI} + #{db_type} + YAML + end + elsif role.include?("server") && role_num != 0 + vm.provision :rke2, run: 'once' do |rke2| + rke2.env = %W[INSTALL_RKE2_TYPE=server #{install_type}] + rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + rke2.config = <<~YAML + write-kubeconfig-mode: '0644' + node-external-ip: #{node_ip} + node-ip: #{node_ip} + server: https://#{NETWORK_PREFIX}.100:9345 + token: vagrant-rke2 + cni: #{CNI} + #{db_type} + YAML + end + end + + if role.include?("agent") + vm.provision :rke2, run: 'once' do |rke2| + rke2.env = %W[INSTALL_RKE2_TYPE=agent #{install_type}] + rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + rke2.install_path = false + rke2.config = <<~YAML + write-kubeconfig-mode: '0644' + node-external-ip: #{node_ip} + node-ip: #{node_ip} + server: https://#{NETWORK_PREFIX}.100:9345 + token: vagrant-rke2 + YAML + end + end +end + +def getDBType(role, role_num, vm) + if EXTERNAL_DB == "mariadb" + if role.include?("server") && role_num == 0 + dockerInstall(vm) + vm.provision "shell", inline: "docker run -d -p 3306:3306 --name #{EXTERNAL_DB} -e MARIADB_ROOT_PASSWORD=e2e mariadb:11" + vm.provision "shell", inline: "echo \"Wait for mariaDB to startup\"; sleep 10" + return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/rke2'" + elsif role.include?("server") && role_num != 0 + return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/rke2'" + end + + elsif EXTERNAL_DB == "mysql" + if role.include?("server") && role_num == 0 + dockerInstall(vm) + vm.provision "shell", inline: "docker run -d -p 3306:3306 --name #{EXTERNAL_DB} -e MYSQL_ROOT_PASSWORD=e2e mysql:5.7" + vm.provision "shell", inline: "echo \"Wait for mysql to startup\"; sleep 10" + return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/rke2'" + elsif role.include?("server") && role_num != 0 + return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/rke2'" + end + + elsif EXTERNAL_DB == "postgres" + if role.include?("server") && role_num == 0 + dockerInstall(vm) + vm.provision "shell", type: "shell", inline: "docker run -d -p 5432:5432 --name postgres -e POSTGRES_PASSWORD=e2e postgres:14-alpine" + vm.provision "shell", inline: "echo \"Wait for postgres to startup\"; sleep 10" + return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/rke2?sslmode=disable'" + elsif role.include?("server") && role_num != 0 + return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/rke2?sslmode=disable'" + end + + elsif EXTERNAL_DB == "sqlite" + if role.include?("server") && role_num == 0 + return "--disable-etcd: true" + end + elsif ( EXTERNAL_DB == "none" ) + if role.include?("server") && role_num == 0 + # Will use etcd + end + else + puts "Unknown EXTERNAL_DB: " + EXTERNAL_DB + abort + end + return "" +end + + +Vagrant.configure("2") do |config| + config.vagrant.plugins = ["vagrant-rke2", "vagrant-reload"] + # Default provider is libvirt, virtualbox is only provided as a backup + config.vm.provider "libvirt" do |v| + v.cpus = NODE_CPUS + v.memory = NODE_MEMORY + end + config.vm.provider "virtualbox" do |v| + v.cpus = NODE_CPUS + v.memory = NODE_MEMORY + end + + if NODE_ROLES.kind_of?(String) + NODE_ROLES = NODE_ROLES.split(" ", -1) + end + if NODE_BOXES.kind_of?(String) + NODE_BOXES = NODE_BOXES.split(" ", -1) + end + + NODE_ROLES.each_with_index do |name, i| + config.vm.define name do |node| + roles = name.split("-", -1) + role_num = roles.pop.to_i + provision(node.vm, roles, role_num, i) + end + end +end diff --git a/tests/e2e/kine/kine_test.go b/tests/e2e/kine/kine_test.go new file mode 100644 index 0000000000..5c9df9b98d --- /dev/null +++ b/tests/e2e/kine/kine_test.go @@ -0,0 +1,244 @@ +package kine + +import ( + "flag" + "fmt" + "os" + "strings" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/rancher/rke2/tests/e2e" +) + +// Valid nodeOS: bento/ubuntu-24.04, opensuse/Leap-15.6.x86_64 +var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system") +var serverCount = flag.Int("serverCount", 1, "number of server nodes") +var agentCount = flag.Int("agentCount", 0, "number of agent nodes") +var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "deploy a locally built RKE2") + +// Environment Variables Info: +// E2E_CNI=(canal|cilium|calico) +// E2E_RELEASE_VERSION=v1.23.1+rke2r1 or nil for latest commit from master +// E2E_EXTERNAL_DB=(mariadb|mysql|postgres|sqlite|none) + +func Test_E2EKineValidation(t *testing.T) { + flag.Parse() + RegisterFailHandler(Fail) + suiteConfig, reporterConfig := GinkgoConfiguration() + RunSpecs(t, "Kine Test Suite", suiteConfig, reporterConfig) +} + +var ( + kubeConfigFile string + serverNodeNames []string + agentNodeNames []string +) +var _ = ReportAfterEach(e2e.GenReport) + +var _ = Describe("Verify Basic Cluster Creation with Kine", Ordered, func() { + It("Starts up kine with no issues", func() { + var err error + if *local { + serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + } + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Verifies ClusterIP Service", func() { + _, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() (string, error) { + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + return e2e.RunCommand(cmd) + }, "240s", "5s").Should(ContainSubstring("test-clusterip")) + + clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false) + cmd := "curl -L --insecure http://" + clusterip + "/name.html" + for _, nodeName := range serverNodeNames { + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd) + } + }) + It("Verifies NodePort Service", func() { + _, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, nodeName := range serverNodeNames { + nodeExternalIP, err := e2e.FetchNodeExternalIP(nodeName) + Expect(err).NotTo(HaveOccurred()) + cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" + nodeport, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + cmd = "curl -L --insecure http://" + nodeExternalIP + ":" + nodeport + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "5s", "1s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd) + cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "120s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd) + } + }) + + It("Verifies LoadBalancer Service", func() { + _, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + ip, err := e2e.FetchNodeExternalIP(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed") + cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" + port, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "240s", "5s").Should(ContainSubstring("test-loadbalancer")) + + cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd) + }) + + It("Verifies Ingress", func() { + _, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, nodeName := range serverNodeNames { + ip, _ := e2e.FetchNodeExternalIP(nodeName) + cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "240s", "5s").Should(ContainSubstring("test-ingress")) + } + }) + + It("Verifies Daemonset", func() { + _, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + count := e2e.CountOfStringInSlice("test-daemonset", pods) + g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") + }, "240s", "10s").Should(Succeed()) + }) + + It("Verifies dns access", func() { + _, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "120s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local")) + }) + + It("Verify Local Path Provisioner storage ", func() { + _, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() (string, error) { + cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile + return e2e.RunCommand(cmd) + }, "120s", "2s").Should(MatchRegexp(`local-path-pvc.+Bound`)) + + Eventually(func() (string, error) { + cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile + return e2e.RunCommand(cmd) + }, "420s", "2s").Should(MatchRegexp(`volume-test.+Running`)) + + cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'" + _, err = e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile + _, err = e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred()) + + _, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() (string, error) { + cmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- cat /data/test" + return e2e.RunCommand(cmd) + }, "180s", "2s").Should(ContainSubstring("local-path-test")) + }) + + Context("Validate restart", func() { + It("Restarts normally", func() { + errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...)) + Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly") + + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + pods, _ := e2e.ParsePods(kubeConfigFile, false) + count := e2e.CountOfStringInSlice("test-daemonset", pods) + g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") + podsRunningAr := 0 + for _, pod := range pods { + if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" { + podsRunningAr++ + } + } + g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart") + }, "1120s", "5s").Should(Succeed()) + }) + }) +}) + +var failed bool +var _ = AfterEach(func() { + failed = failed || CurrentSpecReport().Failed() +}) + +var _ = AfterSuite(func() { + if failed && !*ci { + fmt.Println("FAILED!") + } else { + Expect(e2e.DestroyCluster()).To(Succeed()) + Expect(os.Remove(kubeConfigFile)).To(Succeed()) + } +}) diff --git a/tests/e2e/scripts/run_tests.sh b/tests/e2e/scripts/run_tests.sh index 8de196d1bb..72f4544e43 100755 --- a/tests/e2e/scripts/run_tests.sh +++ b/tests/e2e/scripts/run_tests.sh @@ -12,7 +12,7 @@ set -x # tests to run -tests=("ciliumnokp" "dnscache" "dualstack" "mixedos" "mixedosbgp" "multus" "secretsencryption" "splitserver" "upgradecluster" "validatecluster") +tests=("ciliumnokp" "dnscache" "dualstack" "mixedos" "mixedosbgp" "multus" "secretsencryption" "splitserver" "upgradecluster" "validatecluster" "kine") nodeOS=${1:-"bento/ubuntu-24.04"} OS=$(echo "$nodeOS"|cut -d'/' -f2) diff --git a/tests/e2e/vagrantdefaults.rb b/tests/e2e/vagrantdefaults.rb index 9f64788edd..eb761fd8c6 100644 --- a/tests/e2e/vagrantdefaults.rb +++ b/tests/e2e/vagrantdefaults.rb @@ -36,3 +36,25 @@ def loadManifests(vm, files) end vm.provision "Deploy additional manifests", type: "shell", inline: "mv /tmp/manifests /var/lib/rancher/rke2/server/manifests" end + +def dockerInstall(vm) + vm.provider "libvirt" do |v| + v.memory = NODE_MEMORY + 1024 + end + vm.provider "virtualbox" do |v| + v.memory = NODE_MEMORY + 1024 + end + box = vm.box.to_s + if box.include?("ubuntu") + vm.provision "shell", inline: "apt update; apt install -y docker.io" + elsif box.include?("Leap") + vm.provision "shell", inline: "zypper install -y docker apparmor-parser" + elsif box.include?("microos") + vm.provision "shell", inline: "transactional-update pkg install -y docker apparmor-parser" + vm.provision 'docker-reload', type: 'reload', run: 'once' + vm.provision "shell", inline: "systemctl enable --now docker" + elsif box.include?("rocky") + vm.provision "shell", inline: "dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo" + vm.provision "shell", inline: "dnf install -y docker-ce" + end +end