Skip to content

Commit

Permalink
Add local binary support to split server test, add to drone CI
Browse files Browse the repository at this point in the history
Signed-off-by: Derek Nola <derek.nola@suse.com>
  • Loading branch information
dereknola committed Mar 21, 2024
1 parent d21c3e1 commit db77713
Show file tree
Hide file tree
Showing 3 changed files with 83 additions and 19 deletions.
24 changes: 16 additions & 8 deletions .drone.yml
Original file line number Diff line number Diff line change
Expand Up @@ -636,14 +636,21 @@ steps:
done
fi
- docker run -d -p 5000:5000 -e REGISTRY_PROXY_REMOTEURL=https://registry-1.docker.io --name registry registry:2
- cd tests/e2e/validatecluster
- vagrant destroy -f
- go test -v -timeout=45m ./validatecluster_test.go -ci -local
- cp ./coverage.out /tmp/artifacts/validate-coverage.out
- cd ../secretsencryption
- vagrant destroy -f
- go test -v -timeout=30m ./secretsencryption_test.go -ci -local
- cp ./coverage.out /tmp/artifacts/se-coverage.out
- |
cd tests/e2e/validatecluster
vagrant destroy -f
go test -v -timeout=45m ./validatecluster_test.go -ci -local
cp ./coverage.out /tmp/artifacts/validate-coverage.out
- |
cd ../secretsencryption
vagrant destroy -f
go test -v -timeout=30m ./secretsencryption_test.go -ci -local
cp ./coverage.out /tmp/artifacts/se-coverage.out
- |
cd ../splitserver
vagrant destroy -f
go test -v -timeout=30m ./splitserver_test.go -ci -local
cp ./coverage.out /tmp/artifacts/split-coverage.out
- |
if [ "$DRONE_BUILD_EVENT" = "pull_request" ]; then
cd ../upgradecluster
Expand All @@ -669,6 +676,7 @@ steps:
files:
- /tmp/artifacts/validate-coverage.out
- /tmp/artifacts/se-coverage.out
- /tmp/artifacts/split-coverage.out
- /tmp/artifacts/upgrade-coverage.out
flags:
- e2etests
Expand Down
10 changes: 6 additions & 4 deletions tests/e2e/splitserver/Vagrantfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-etcd-0", "server-etcd-1", "server-etcd-2", "server-cp-0", "server-cp-1", "agent-0"])
["server-etcd-0", "server-etcd-1", "server-etcd-2", "server-cp-0", "server-cp-1", "agent-0", "agent-1"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
Expand All @@ -24,10 +24,11 @@ def provision(vm, role, role_num, node_num)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)

vm.provision "ping k3s.io", type: "shell", inline: "ping -c 2 k3s.io"

if node_num == 0 && !role.include?("server") && !role.include?("etcd")
puts "first node must be a etcd server"
abort
if ARGV.include?("up") || (ARGV.include?("reload") && ARGV.include?("--provision"))
puts "Error: first node provisioned must be a etcd server"
abort
end
elsif role.include?("server") && role.include?("etcd") && role_num == 0
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = "server"
Expand Down Expand Up @@ -118,6 +119,7 @@ Vagrant.configure("2") do |config|
end

NODE_ROLES.each_with_index do |role, i|
# Find the first number in the role name
role_num = role.split("-", -1).pop.to_i
config.vm.define role do |node|
provision(node.vm, role, role_num, i)
Expand Down
68 changes: 61 additions & 7 deletions tests/e2e/splitserver/splitserver_test.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,19 @@
package splitserver

import (
"context"
"flag"
"fmt"
"os"
"strconv"
"strings"
"testing"
"time"

"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"golang.org/x/sync/errgroup"
)

// Valid nodeOS: generic/ubuntu2204, opensuse/Leap-15.3.x86_64
Expand All @@ -19,21 +22,25 @@ var etcdCount = flag.Int("etcdCount", 3, "number of server nodes only deploying
var controlPlaneCount = flag.Int("controlPlaneCount", 1, "number of server nodes acting as control plane")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
var hardened = flag.Bool("hardened", false, "true or false")

// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master

func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount int) ([]string, []string, []string, error) {
// createSplitCluster creates a split server cluster with the given nodeOS, etcdCount, controlPlaneCount, and agentCount.
// It duplicates and merges functionality found in the e2e.CreateCluster and e2e.CreateLocalCluster functions.
func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount int, local bool) ([]string, []string, []string, error) {
etcdNodeNames := make([]string, etcdCount)
cpNodeNames := make([]string, controlPlaneCount)
agentNodeNames := make([]string, agentCount)

for i := 0; i < etcdCount; i++ {
etcdNodeNames[i] = "server-etcd-" + strconv.Itoa(i)
}
cpNodeNames := make([]string, controlPlaneCount)
for i := 0; i < controlPlaneCount; i++ {
cpNodeNames[i] = "server-cp-" + strconv.Itoa(i)
}
agentNodeNames := make([]string, agentCount)
for i := 0; i < agentCount; i++ {
agentNodeNames[i] = "agent-" + strconv.Itoa(i)
}
Expand All @@ -43,21 +50,68 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount
nodeBoxes := strings.Repeat(nodeOS+" ", etcdCount+controlPlaneCount+agentCount)
nodeBoxes = strings.TrimSpace(nodeBoxes)

allNodes := append(etcdNodeNames, cpNodeNames...)
allNodes = append(allNodes, agentNodeNames...)

var testOptions string
for _, env := range os.Environ() {
if strings.HasPrefix(env, "E2E_") {
testOptions += " " + env
}
}

cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" %s vagrant up &> vagrant.log`, nodeRoles, nodeBoxes, testOptions)
// Provision the first etcd node. In GitHub Actions, this also imports the VM image into libvirt, which
// takes time and can cause the next vagrant up to fail if it is not given enough time to complete.
cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-provision %s &> vagrant.log`, nodeRoles, nodeBoxes, etcdNodeNames[0])
fmt.Println(cmd)
if _, err := e2e.RunCommand(cmd); err != nil {
fmt.Println("Error Creating Cluster", err)
return nil, nil, nil, err
return etcdNodeNames, cpNodeNames, agentNodeNames, err
}

// Bring up the rest of the nodes in parallel
errg, _ := errgroup.WithContext(context.Background())
for _, node := range allNodes[1:] {
cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-provision %s &>> vagrant.log`, nodeRoles, nodeBoxes, node)
errg.Go(func() error {
_, err := e2e.RunCommand(cmd)
return err
})
// libVirt/Virtualbox needs some time between provisioning nodes
time.Sleep(10 * time.Second)
}
if err := errg.Wait(); err != nil {
return etcdNodeNames, cpNodeNames, agentNodeNames, err
}

if local {
testOptions += " E2E_RELEASE_VERSION=skip"
for _, node := range allNodes {
cmd := fmt.Sprintf(`E2E_NODE_ROLES=%s vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node, node)
if _, err := e2e.RunCommand(cmd); err != nil {
return etcdNodeNames, cpNodeNames, agentNodeNames, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
}
if _, err := e2e.RunCmdOnNode("mv /tmp/k3s /usr/local/bin/", node); err != nil {
return etcdNodeNames, cpNodeNames, agentNodeNames, err
}
}
}
// Install K3s on all nodes in parallel
errg, _ = errgroup.WithContext(context.Background())
for _, node := range allNodes {
cmd = fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" %s vagrant provision %s &>> vagrant.log`, nodeRoles, nodeBoxes, testOptions, node)
errg.Go(func() error {
_, err := e2e.RunCommand(cmd)
return err
})
// K3s needs some time between joining nodes to avoid learner issues
time.Sleep(10 * time.Second)
}
if err := errg.Wait(); err != nil {
return etcdNodeNames, cpNodeNames, agentNodeNames, err
}
return etcdNodeNames, cpNodeNames, agentNodeNames, nil
}

func Test_E2ESplitServer(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
Expand All @@ -78,7 +132,7 @@ var _ = Describe("Verify Create", Ordered, func() {
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
etcdNodeNames, cpNodeNames, agentNodeNames, err = createSplitCluster(*nodeOS, *etcdCount, *controlPlaneCount, *agentCount)
etcdNodeNames, cpNodeNames, agentNodeNames, err = createSplitCluster(*nodeOS, *etcdCount, *controlPlaneCount, *agentCount, *local)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
Expand Down

0 comments on commit db77713

Please sign in to comment.