From 8ada430ccacb782fdd99e3928e47e3e6b197d386 Mon Sep 17 00:00:00 2001 From: Kirsten Garrison Date: Mon, 20 Jul 2020 17:42:07 -0700 Subject: [PATCH 1/5] test: move e2e helper functions to utils_test.go --- test/e2e/mcd_test.go | 82 ---------------------------------------- test/e2e/utils_test.go | 85 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 82 deletions(-) diff --git a/test/e2e/mcd_test.go b/test/e2e/mcd_test.go index 2316fea521..fcf69092d5 100644 --- a/test/e2e/mcd_test.go +++ b/test/e2e/mcd_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/uuid" @@ -49,67 +48,6 @@ func TestMCDToken(t *testing.T) { } } -func mcLabelForRole(role string) map[string]string { - mcLabels := make(map[string]string) - mcLabels[mcfgv1.MachineConfigRoleLabelKey] = role - return mcLabels -} - -func mcLabelForWorkers() map[string]string { - return mcLabelForRole("worker") -} - -// TODO consider also testing for Ign2 -// func createIgn2File(path, content, fs string, mode int) ign2types.File { -// return ign2types.File{ -// FileEmbedded1: ign2types.FileEmbedded1{ -// Contents: ign2types.FileContents{ -// Source: content, -// }, -// Mode: &mode, -// }, -// Node: ign2types.Node{ -// Filesystem: fs, -// Path: path, -// User: &ign2types.NodeUser{ -// Name: "root", -// }, -// }, -// } -// } - -func createIgn3File(path, content string, mode int) ign3types.File { - return ign3types.File{ - FileEmbedded1: ign3types.FileEmbedded1{ - Contents: ign3types.Resource{ - Source: &content, - }, - Mode: &mode, - }, - Node: ign3types.Node{ - Path: path, - User: ign3types.NodeUser{ - Name: helpers.StrToPtr("root"), - }, - }, - } -} - -func createMCToAddFileForRole(name, role, filename, data string) *mcfgv1.MachineConfig { - mcadd := createMC(fmt.Sprintf("%s-%s", name, uuid.NewUUID()), role) - - ignConfig := ctrlcommon.NewIgnConfig() - ignFile := createIgn3File(filename, "data:,"+data, 420) - ignConfig.Storage.Files = append(ignConfig.Storage.Files, ignFile) - rawIgnConfig := helpers.MarshalOrDie(ignConfig) - mcadd.Spec.Config.Raw = rawIgnConfig - return mcadd -} - -func createMCToAddFile(name, filename, data string) *mcfgv1.MachineConfig { - return createMCToAddFileForRole(name, "worker", filename, data) -} - func TestMCDeployed(t *testing.T) { cs := framework.NewClientSet("") @@ -139,26 +77,6 @@ func TestMCDeployed(t *testing.T) { } } -func mcdForNode(cs *framework.ClientSet, node *corev1.Node) (*corev1.Pod, error) { - // find the MCD pod that has spec.nodeNAME = node.Name and get its name: - listOptions := metav1.ListOptions{ - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String(), - } - listOptions.LabelSelector = labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}).String() - - mcdList, err := cs.Pods("openshift-machine-config-operator").List(context.TODO(), listOptions) - if err != nil { - return nil, err - } - if len(mcdList.Items) != 1 { - if len(mcdList.Items) == 0 { - return nil, fmt.Errorf("Failed to find MCD for node %s", node.Name) - } - return nil, fmt.Errorf("Too many (%d) MCDs for node %s", len(mcdList.Items), node.Name) - } - return &mcdList.Items[0], nil -} - func TestUpdateSSH(t *testing.T) { cs := framework.NewClientSet("") diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index fcee703b1d..fa934c7a54 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -8,14 +8,18 @@ import ( "testing" "time" + ign3types "github.com/coreos/ignition/v2/config/v3_1/types" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/test/e2e/framework" "github.com/openshift/machine-config-operator/test/helpers" "github.com/pkg/errors" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" ) @@ -176,3 +180,84 @@ func execCmdOnNode(t *testing.T, cs *framework.ClientSet, node corev1.Node, args require.Nil(t, err, "failed to exec cmd %v on node %s: %s", args, node.Name, string(b)) return string(b) } + +func mcLabelForRole(role string) map[string]string { + mcLabels := make(map[string]string) + mcLabels[mcfgv1.MachineConfigRoleLabelKey] = role + return mcLabels +} + +func mcLabelForWorkers() map[string]string { + return mcLabelForRole("worker") +} + +// TODO consider also testing for Ign2 +// func createIgn2File(path, content, fs string, mode int) ign2types.File { +// return ign2types.File{ +// FileEmbedded1: ign2types.FileEmbedded1{ +// Contents: ign2types.FileContents{ +// Source: content, +// }, +// Mode: &mode, +// }, +// Node: ign2types.Node{ +// Filesystem: fs, +// Path: path, +// User: &ign2types.NodeUser{ +// Name: "root", +// }, +// }, +// } +// } + +func createIgn3File(path, content string, mode int) ign3types.File { + return ign3types.File{ + FileEmbedded1: ign3types.FileEmbedded1{ + Contents: ign3types.Resource{ + Source: &content, + }, + Mode: &mode, + }, + Node: ign3types.Node{ + Path: path, + User: ign3types.NodeUser{ + Name: helpers.StrToPtr("root"), + }, + }, + } +} + +func createMCToAddFileForRole(name, role, filename, data string) *mcfgv1.MachineConfig { + mcadd := createMC(fmt.Sprintf("%s-%s", name, uuid.NewUUID()), role) + + ignConfig := ctrlcommon.NewIgnConfig() + ignFile := createIgn3File(filename, "data:,"+data, 420) + ignConfig.Storage.Files = append(ignConfig.Storage.Files, ignFile) + rawIgnConfig := helpers.MarshalOrDie(ignConfig) + mcadd.Spec.Config.Raw = rawIgnConfig + return mcadd +} + +func createMCToAddFile(name, filename, data string) *mcfgv1.MachineConfig { + return createMCToAddFileForRole(name, "worker", filename, data) +} + +func mcdForNode(cs *framework.ClientSet, node *corev1.Node) (*corev1.Pod, error) { + // find the MCD pod that has spec.nodeNAME = node.Name and get its name: + listOptions := metav1.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String(), + } + listOptions.LabelSelector = labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}).String() + + mcdList, err := cs.Pods("openshift-machine-config-operator").List(context.TODO(), listOptions) + if err != nil { + return nil, err + } + if len(mcdList.Items) != 1 { + if len(mcdList.Items) == 0 { + return nil, fmt.Errorf("Failed to find MCD for node %s", node.Name) + } + return nil, fmt.Errorf("Too many (%d) MCDs for node %s", len(mcdList.Items), node.Name) + } + return &mcdList.Items[0], nil +} From 0fffc08b2566773fb0b6c1eff889ca2cedc913de Mon Sep 17 00:00:00 2001 From: Kirsten Garrison Date: Mon, 20 Jul 2020 17:42:23 -0700 Subject: [PATCH 2/5] e2e: remove TestUpdateSSH SSHKeys funcationality is tested in TestIgn3Cfg. --- test/e2e/mcd_test.go | 48 -------------------------------------------- 1 file changed, 48 deletions(-) diff --git a/test/e2e/mcd_test.go b/test/e2e/mcd_test.go index fcf69092d5..82d6b7af82 100644 --- a/test/e2e/mcd_test.go +++ b/test/e2e/mcd_test.go @@ -77,54 +77,6 @@ func TestMCDeployed(t *testing.T) { } } -func TestUpdateSSH(t *testing.T) { - cs := framework.NewClientSet("") - - // create a dummy MC with an sshKey for user Core - mcName := fmt.Sprintf("00-sshkeys-worker-%s", uuid.NewUUID()) - mcadd := &mcfgv1.MachineConfig{} - mcadd.ObjectMeta = metav1.ObjectMeta{ - Name: mcName, - Labels: mcLabelForWorkers(), - } - // create a new MC that adds a valid user & ssh keys - tempUser := ign3types.PasswdUser{ - Name: "core", - SSHAuthorizedKeys: []ign3types.SSHAuthorizedKey{ - "1234_test", - "abc_test", - }, - } - ignConfig := ctrlcommon.NewIgnConfig() - ignConfig.Passwd.Users = append(ignConfig.Passwd.Users, tempUser) - rawIgnConfig := helpers.MarshalOrDie(ignConfig) - mcadd.Spec.Config.Raw = rawIgnConfig - - _, err := cs.MachineConfigs().Create(context.TODO(), mcadd, metav1.CreateOptions{}) - require.Nil(t, err, "failed to create MC") - t.Logf("Created %s", mcadd.Name) - - // grab the latest worker- MC - renderedConfig, err := waitForRenderedConfig(t, cs, "worker", mcadd.Name) - require.Nil(t, err) - err = waitForPoolComplete(t, cs, "worker", renderedConfig) - require.Nil(t, err) - nodes, err := getNodesByRole(cs, "worker") - require.Nil(t, err) - for _, node := range nodes { - assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) - assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - - // now rsh into that daemon and grep the authorized key file to check if 1234_test was written - // must do both commands in same shell, combine commands into one exec.Command() - found := execCmdOnNode(t, cs, node, "grep", "1234_test", "/rootfs/home/core/.ssh/authorized_keys") - if !strings.Contains(found, "1234_test") { - t.Fatalf("updated ssh keys not found in authorized_keys, got %s", found) - } - t.Logf("Node %s has SSH key", node.Name) - } -} - func TestKernelArguments(t *testing.T) { cs := framework.NewClientSet("") kargsMC := &mcfgv1.MachineConfig{ From 792be66f588a0d90fdde7611e8f2cefcde9be5d9 Mon Sep 17 00:00:00 2001 From: Kirsten Garrison Date: Mon, 20 Jul 2020 17:42:44 -0700 Subject: [PATCH 3/5] Remove TestCustomPool and use custom pools in funcs used in: TestIgn3Cfg,TestKernelType & TestDontDeleteRPMFiles This serves the same purpose as deleted test and speeds up e2e by running on a custom pool instead of all workers Also update utils_test.go to account for custom pools update utils --- test/e2e/mcd_test.go | 201 +++++++++++++++++++++++------------------ test/e2e/utils_test.go | 2 +- 2 files changed, 113 insertions(+), 90 deletions(-) diff --git a/test/e2e/mcd_test.go b/test/e2e/mcd_test.go index 82d6b7af82..9b6d68d7fa 100644 --- a/test/e2e/mcd_test.go +++ b/test/e2e/mcd_test.go @@ -118,15 +118,24 @@ func TestKernelArguments(t *testing.T) { func TestKernelType(t *testing.T) { cs := framework.NewClientSet("") - // Get initial MachineConfig used by the worker pool so that we can rollback to it later on - mcp, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) + + // Create infra pool to roll out MC changes + unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") + createMCP(t, cs, "infra") + + // create old mc to have something to verify we successfully rolled back + oldInfraConfig := createMC("old-infra", "infra") + _, err := cs.MachineConfigs().Create(context.TODO(), oldInfraConfig, metav1.CreateOptions{}) + require.Nil(t, err) + oldInfraRenderedConfig, err := waitForRenderedConfig(t, cs, "infra", oldInfraConfig.Name) + err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) - workerOldMC := mcp.Status.Configuration.Name + // create kernel type MC and roll out kernelType := &mcfgv1.MachineConfig{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("kerneltype-%s", uuid.NewUUID()), - Labels: mcLabelForWorkers(), + Labels: mcLabelForRole("infra"), }, Spec: mcfgv1.MachineConfigSpec{ Config: runtime.RawExtension{ @@ -139,23 +148,21 @@ func TestKernelType(t *testing.T) { _, err = cs.MachineConfigs().Create(context.TODO(), kernelType, metav1.CreateOptions{}) require.Nil(t, err) t.Logf("Created %s", kernelType.Name) - renderedConfig, err := waitForRenderedConfig(t, cs, "worker", kernelType.Name) + renderedConfig, err := waitForRenderedConfig(t, cs, "infra", kernelType.Name) require.Nil(t, err) - if err := waitForPoolComplete(t, cs, "worker", renderedConfig); err != nil { + if err := waitForPoolComplete(t, cs, "infra", renderedConfig); err != nil { t.Fatal(err) } - nodes, err := getNodesByRole(cs, "worker") - require.Nil(t, err) - for _, node := range nodes { - assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) - assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) + infraNode := getSingleNodeByRole(t, cs, "infra") - kernelInfo := execCmdOnNode(t, cs, node, "uname", "-a") - if !strings.Contains(kernelInfo, "PREEMPT RT") { - t.Fatalf("Node %s doesn't have expected kernel", node.Name) - } - t.Logf("Node %s has expected kernel", node.Name) + assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) + assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) + + kernelInfo := execCmdOnNode(t, cs, infraNode, "uname", "-a") + if !strings.Contains(kernelInfo, "PREEMPT RT") { + t.Fatalf("Node %s doesn't have expected kernel", infraNode.Name) } + t.Logf("Node %s has expected kernel", infraNode.Name) // Delete the applied kerneltype MachineConfig to make sure rollback works fine if err := cs.MachineConfigs().Delete(context.TODO(), kernelType.Name, metav1.DeleteOptions{}); err != nil { @@ -165,22 +172,37 @@ func TestKernelType(t *testing.T) { t.Logf("Deleted MachineConfig %s", kernelType.Name) // Wait for the mcp to rollback to previous config - if err := waitForPoolComplete(t, cs, "worker", workerOldMC); err != nil { + if err := waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig); err != nil { t.Fatal(err) } - // Re-fetch the worker nodes for updated annotations - nodes, err = getNodesByRole(cs, "worker") + // Re-fetch the infra node for updated annotations + infraNode = getSingleNodeByRole(t, cs, "infra") + + assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], oldInfraRenderedConfig) + assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) + kernelInfo = execCmdOnNode(t, cs, infraNode, "uname", "-a") + if strings.Contains(kernelInfo, "PREEMPT RT") { + t.Fatalf("Node %s did not rollback successfully", infraNode.Name) + } + t.Logf("Node %s has successfully rolled back", infraNode.Name) + + unlabelFunc() + + workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) require.Nil(t, err) - for _, node := range nodes { - assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], workerOldMC) - assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - kernelInfo := execCmdOnNode(t, cs, node, "uname", "-a") - if strings.Contains(kernelInfo, "PREEMPT RT") { - t.Fatalf("Node %s did not rollback successfully", node.Name) + if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { + node, err := cs.Nodes().Get(context.TODO(), infraNode.Name, metav1.GetOptions{}) + require.Nil(t, err) + if node.Annotations[constants.DesiredMachineConfigAnnotationKey] != workerMCP.Spec.Configuration.Name { + return false, nil } - t.Logf("Node %s has successfully rolled back", node.Name) + return true, nil + }); err != nil { + t.Errorf("infra node hasn't moved back to worker config: %v", err) } + err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) + require.Nil(t, err) } @@ -332,12 +354,24 @@ func TestReconcileAfterBadMC(t *testing.T) { } } +// Test that deleting a MC that changes a file does not completely delete the file +// entirely but rather restores it to its original state. func TestDontDeleteRPMFiles(t *testing.T) { cs := framework.NewClientSet("") - mcHostFile := createMCToAddFile("modify-host-file", "/etc/motd", "mco-test") + unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") + //createMCP(t, cs, "infra") - workerOldMc := getMcName(t, cs, "worker") + // create old mc to have something to verify we successfully rolled back + // oldInfraConfig := createMC("old-infra-rpm", "infra") + // _, err := cs.MachineConfigs().Create(context.TODO(), oldInfraConfig, metav1.CreateOptions{}) + // require.Nil(t, err) + // oldInfraRenderedConfig, err := waitForRenderedConfig(t, cs, "infra", oldInfraConfig.Name) + // err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) + // require.Nil(t, err) + oldInfraRenderedConfig := getMcName(t, cs, "infra") + + mcHostFile := createMCToAddFileForRole("modify-host-file", "infra", "/etc/motd", "mco-test") // create the dummy MC now _, err := cs.MachineConfigs().Create(context.TODO(), mcHostFile, metav1.CreateOptions{}) @@ -345,13 +379,10 @@ func TestDontDeleteRPMFiles(t *testing.T) { t.Errorf("failed to create machine config %v", err) } - renderedConfig, err := waitForRenderedConfig(t, cs, "worker", mcHostFile.Name) + renderedConfig, err := waitForRenderedConfig(t, cs, "infra", mcHostFile.Name) + require.Nil(t, err) + err = waitForPoolComplete(t, cs, "infra", renderedConfig) require.Nil(t, err) - - // wait for the mcp to go back to previous config - if err := waitForPoolComplete(t, cs, "worker", renderedConfig); err != nil { - t.Fatal(err) - } // now delete the bad MC and watch the nodes reconciling as expected if err := cs.MachineConfigs().Delete(context.TODO(), mcHostFile.Name, metav1.DeleteOptions{}); err != nil { @@ -359,48 +390,18 @@ func TestDontDeleteRPMFiles(t *testing.T) { } // wait for the mcp to go back to previous config - if err := waitForPoolComplete(t, cs, "worker", workerOldMc); err != nil { - t.Fatal(err) - } - - nodes, err := getNodesByRole(cs, "worker") - require.Nil(t, err) - - for _, node := range nodes { - assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], workerOldMc) - assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - - found := execCmdOnNode(t, cs, node, "cat", "/rootfs/etc/motd") - if strings.Contains(found, "mco-test") { - t.Fatalf("updated file doesn't contain expected data, got %s", found) - } - } -} - -func TestCustomPool(t *testing.T) { - cs := framework.NewClientSet("") - - unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") - - createMCP(t, cs, "infra") - - infraMC := createMCToAddFileForRole("infra-host-file", "infra", "/etc/mco-custom-pool", "mco-custom-pool") - _, err := cs.MachineConfigs().Create(context.TODO(), infraMC, metav1.CreateOptions{}) - require.Nil(t, err) - renderedConfig, err := waitForRenderedConfig(t, cs, "infra", infraMC.Name) - require.Nil(t, err) - err = waitForPoolComplete(t, cs, "infra", renderedConfig) + err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) infraNode := getSingleNodeByRole(t, cs, "infra") - assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) + assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], oldInfraRenderedConfig) assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - out := execCmdOnNode(t, cs, infraNode, "cat", "/rootfs/etc/mco-custom-pool") - if !strings.Contains(out, "mco-custom-pool") { - t.Fatalf("Unexpected infra MC content on node %s: %s", infraNode.Name, out) + + found := execCmdOnNode(t, cs, infraNode, "cat", "/rootfs/etc/motd") + if strings.Contains(found, "mco-test") { + t.Fatalf("updated file doesn't contain expected data, got %s", found) } - t.Logf("Node %s has expected infra MC content", infraNode.Name) unlabelFunc() @@ -416,19 +417,24 @@ func TestCustomPool(t *testing.T) { }); err != nil { t.Errorf("infra node hasn't moved back to worker config: %v", err) } - err = waitForPoolComplete(t, cs, "infra", renderedConfig) + err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) + } func TestIgn3Cfg(t *testing.T) { cs := framework.NewClientSet("") + unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") + + //createMCP(t, cs, "infra") + // create a dummy MC with an sshKey for user Core - mcName := fmt.Sprintf("99-ign3cfg-worker-%s", uuid.NewUUID()) + mcName := fmt.Sprintf("99-ign3cfg-infra-%s", uuid.NewUUID()) mcadd := &mcfgv1.MachineConfig{} mcadd.ObjectMeta = metav1.ObjectMeta{ Name: mcName, - Labels: mcLabelForWorkers(), + Labels: mcLabelForRole("infra"), } // create a new MC that adds a valid user & ssh key testIgn3Config := ign3types.Config{} @@ -448,26 +454,43 @@ func TestIgn3Cfg(t *testing.T) { t.Logf("Created %s", mcadd.Name) // grab the latest worker- MC - renderedConfig, err := waitForRenderedConfig(t, cs, "worker", mcadd.Name) - require.Nil(t, err) - err = waitForPoolComplete(t, cs, "worker", renderedConfig) + renderedConfig, err := waitForRenderedConfig(t, cs, "infra", mcadd.Name) require.Nil(t, err) - nodes, err := getNodesByRole(cs, "worker") + err = waitForPoolComplete(t, cs, "infra", renderedConfig) require.Nil(t, err) - for _, node := range nodes { - assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) - assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - foundSSH := execCmdOnNode(t, cs, node, "grep", "1234_test_ign3", "/rootfs/home/core/.ssh/authorized_keys") - if !strings.Contains(foundSSH, "1234_test_ign3") { - t.Fatalf("updated ssh keys not found in authorized_keys, got %s", foundSSH) - } - t.Logf("Node %s has SSH key", node.Name) + infraNode := getSingleNodeByRole(t, cs, "infra") + + assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) + assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) + + foundSSH := execCmdOnNode(t, cs, infraNode, "grep", "1234_test_ign3", "/rootfs/home/core/.ssh/authorized_keys") + if !strings.Contains(foundSSH, "1234_test_ign3") { + t.Fatalf("updated ssh keys not found in authorized_keys, got %s", foundSSH) + } + t.Logf("Node %s has SSH key", infraNode.Name) + + foundFile := execCmdOnNode(t, cs, infraNode, "cat", "/rootfs/etc/testfileconfig") + if !strings.Contains(foundFile, "test-ign3-stuff") { + t.Fatalf("updated file doesn't contain expected data, got %s", foundFile) + } + t.Logf("Node %s has file", infraNode.Name) + + unlabelFunc() - foundFile := execCmdOnNode(t, cs, node, "cat", "/rootfs/etc/testfileconfig") - if !strings.Contains(foundFile, "test-ign3-stuff") { - t.Fatalf("updated file doesn't contain expected data, got %s", foundFile) + workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) + require.Nil(t, err) + if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { + node, err := cs.Nodes().Get(context.TODO(), infraNode.Name, metav1.GetOptions{}) + require.Nil(t, err) + if node.Annotations[constants.DesiredMachineConfigAnnotationKey] != workerMCP.Spec.Configuration.Name { + return false, nil } - t.Logf("Node %s has file", node.Name) + return true, nil + }); err != nil { + t.Errorf("infra node hasn't moved back to worker config: %v", err) } + err = waitForPoolComplete(t, cs, "infra", renderedConfig) + require.Nil(t, err) + } diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index fa934c7a54..791a03fee3 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -95,7 +95,7 @@ func labelRandomNodeFromPool(t *testing.T, cs *framework.ClientSet, pool, label rand.Seed(time.Now().UnixNano()) infraNode := nodes[rand.Intn(len(nodes))] - out, err := exec.Command("oc", "label", "node", infraNode.Name, label+"=").CombinedOutput() + out, err := exec.Command("oc", "label", "node", infraNode.Name, label+"=", "--overwrite=true").CombinedOutput() require.Nil(t, err, "unable to label worker node %s with infra: %s", infraNode.Name, string(out)) return func() { out, err = exec.Command("oc", "label", "node", infraNode.Name, label+"-").CombinedOutput() From 68ed8539293e63fc98c7129b370a02e8917ae6bb Mon Sep 17 00:00:00 2001 From: Kirsten Garrison Date: Mon, 20 Jul 2020 17:42:59 -0700 Subject: [PATCH 4/5] update TestKernelArguments to use custom pool & rollback change " baz=test bar=\"hello world\"" to " baz=test bar=hello world" to account for rpmostree bug in deleting args see: https://github.com/coreos/rpm-ostree/issues/2159 --- test/e2e/mcd_test.go | 83 ++++++++++++++++++++++++++++++-------------- 1 file changed, 56 insertions(+), 27 deletions(-) diff --git a/test/e2e/mcd_test.go b/test/e2e/mcd_test.go index 9b6d68d7fa..01d7af2b25 100644 --- a/test/e2e/mcd_test.go +++ b/test/e2e/mcd_test.go @@ -79,57 +79,86 @@ func TestMCDeployed(t *testing.T) { func TestKernelArguments(t *testing.T) { cs := framework.NewClientSet("") + + // Create infra pool to roll out MC changes + unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") + createMCP(t, cs, "infra") + + // create old mc to have something to verify we successfully rolled back + oldInfraConfig := createMC("old-infra", "infra") + _, err := cs.MachineConfigs().Create(context.TODO(), oldInfraConfig, metav1.CreateOptions{}) + require.Nil(t, err) + oldInfraRenderedConfig, err := waitForRenderedConfig(t, cs, "infra", oldInfraConfig.Name) + err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) + require.Nil(t, err) + + // create kargs MC kargsMC := &mcfgv1.MachineConfig{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("kargs-%s", uuid.NewUUID()), - Labels: mcLabelForWorkers(), + Labels: mcLabelForRole("infra"), }, Spec: mcfgv1.MachineConfigSpec{ Config: runtime.RawExtension{ Raw: helpers.MarshalOrDie(ctrlcommon.NewIgnConfig()), }, - KernelArguments: []string{"nosmt", "foo=bar", "foo=baz", " baz=test bar=\"hello world\""}, + KernelArguments: []string{"nosmt", "foo=bar", "foo=baz", " baz=test bar=hello world"}, }, } - _, err := cs.MachineConfigs().Create(context.TODO(), kargsMC, metav1.CreateOptions{}) + _, err = cs.MachineConfigs().Create(context.TODO(), kargsMC, metav1.CreateOptions{}) require.Nil(t, err) t.Logf("Created %s", kargsMC.Name) - renderedConfig, err := waitForRenderedConfig(t, cs, "worker", kargsMC.Name) + renderedConfig, err := waitForRenderedConfig(t, cs, "infra", kargsMC.Name) require.Nil(t, err) - if err := waitForPoolComplete(t, cs, "worker", renderedConfig); err != nil { - t.Fatal(err) + err = waitForPoolComplete(t, cs, "infra", renderedConfig) + require.Nil(t, err) + + // Re-fetch the infra node for updated annotations + infraNode := getSingleNodeByRole(t, cs, "infra") + assert.Equal(t, infraNode.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) + assert.Equal(t, infraNode.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) + kargs := execCmdOnNode(t, cs, infraNode, "cat", "/rootfs/proc/cmdline") + expectedKernelArgs := []string{"nosmt", "foo=bar", "foo=baz", "baz=test", "bar=hello world"} + for _, v := range expectedKernelArgs { + if !strings.Contains(kargs, v) { + t.Fatalf("Missing %q in kargs: %q", v, kargs) + } + } + t.Logf("Node %s has expected kargs", infraNode.Name) + + // cleanup - delete karg mc and rollback + if err := cs.MachineConfigs().Delete(context.TODO(), kargsMC.Name, metav1.DeleteOptions{}); err != nil { + t.Error(err) } - nodes, err := getNodesByRole(cs, "worker") + t.Logf("Deleted MachineConfig %s", kargsMC.Name) + err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) require.Nil(t, err) - for _, node := range nodes { - assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) - assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) - kargs := execCmdOnNode(t, cs, node, "cat", "/rootfs/proc/cmdline") - expectedKernelArgs := []string{"nosmt", "foo=bar", "foo=baz", "baz=test", "\"bar=hello world\""} - for _, v := range expectedKernelArgs { - if !strings.Contains(kargs, v) { - t.Fatalf("Missing %q in kargs: %q", v, kargs) - } + + unlabelFunc() + + workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) + require.Nil(t, err) + if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { + node, err := cs.Nodes().Get(context.TODO(), infraNode.Name, metav1.GetOptions{}) + require.Nil(t, err) + if node.Annotations[constants.DesiredMachineConfigAnnotationKey] != workerMCP.Spec.Configuration.Name { + return false, nil } - t.Logf("Node %s has expected kargs", node.Name) + return true, nil + }); err != nil { + t.Errorf("infra node hasn't moved back to worker config: %v", err) } + err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) + require.Nil(t, err) } func TestKernelType(t *testing.T) { cs := framework.NewClientSet("") - // Create infra pool to roll out MC changes unlabelFunc := labelRandomNodeFromPool(t, cs, "worker", "node-role.kubernetes.io/infra") - createMCP(t, cs, "infra") - // create old mc to have something to verify we successfully rolled back - oldInfraConfig := createMC("old-infra", "infra") - _, err := cs.MachineConfigs().Create(context.TODO(), oldInfraConfig, metav1.CreateOptions{}) - require.Nil(t, err) - oldInfraRenderedConfig, err := waitForRenderedConfig(t, cs, "infra", oldInfraConfig.Name) - err = waitForPoolComplete(t, cs, "infra", oldInfraRenderedConfig) - require.Nil(t, err) + oldInfraRenderedConfig := getMcName(t, cs, "infra") // create kernel type MC and roll out kernelType := &mcfgv1.MachineConfig{ @@ -145,7 +174,7 @@ func TestKernelType(t *testing.T) { }, } - _, err = cs.MachineConfigs().Create(context.TODO(), kernelType, metav1.CreateOptions{}) + _, err := cs.MachineConfigs().Create(context.TODO(), kernelType, metav1.CreateOptions{}) require.Nil(t, err) t.Logf("Created %s", kernelType.Name) renderedConfig, err := waitForRenderedConfig(t, cs, "infra", kernelType.Name) From 266ce61d62a7cb049c434a3316c34caaa09039b9 Mon Sep 17 00:00:00 2001 From: Kirsten Garrison Date: Mon, 20 Jul 2020 17:43:08 -0700 Subject: [PATCH 5/5] Makefile drop timeout to 75m --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3b2c8865d7..9198f76915 100644 --- a/Makefile +++ b/Makefile @@ -109,4 +109,4 @@ Dockerfile.rhel7: Dockerfile Makefile # This was copied from https://github.com/openshift/cluster-image-registry-operator test-e2e: - go test -failfast -timeout 120m -v$${WHAT:+ -run="$$WHAT"} ./test/e2e/ + go test -failfast -timeout 75m -v$${WHAT:+ -run="$$WHAT"} ./test/e2e/