diff --git a/charts/steadybit-extension-host/Chart.yaml b/charts/steadybit-extension-host/Chart.yaml index 61a09d0..e3ea30d 100644 --- a/charts/steadybit-extension-host/Chart.yaml +++ b/charts/steadybit-extension-host/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: steadybit-extension-host description: Steadybit host extension Helm chart for Kubernetes. -version: 1.1.26 +version: 1.1.27 appVersion: v1.2.24 home: https://www.steadybit.com/ icon: https://steadybit-website-assets.s3.amazonaws.com/logo-symbol-transparent.png diff --git a/charts/steadybit-extension-host/tests/__snapshot__/daemonset_test.yaml.snap b/charts/steadybit-extension-host/tests/__snapshot__/daemonset_test.yaml.snap index 6386044..ec784a7 100644 --- a/charts/steadybit-extension-host/tests/__snapshot__/daemonset_test.yaml.snap +++ b/charts/steadybit-extension-host/tests/__snapshot__/daemonset_test.yaml.snap @@ -188,7 +188,7 @@ manifest should match snapshot with TLS: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -311,7 +311,7 @@ manifest should match snapshot with appArmorProfile for k8s >= 1.30: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -430,7 +430,7 @@ manifest should match snapshot with different containerPorts: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -554,7 +554,7 @@ manifest should match snapshot with extra env vars: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -673,7 +673,7 @@ manifest should match snapshot with extra labels: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -796,7 +796,7 @@ manifest should match snapshot with mutual TLS: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -933,7 +933,7 @@ manifest should match snapshot with mutual TLS using containerPaths: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -1050,7 +1050,7 @@ manifest should match snapshot with podSecurityContext: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -1169,7 +1169,7 @@ manifest should match snapshot with priority class: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -1287,7 +1287,7 @@ manifest should match snapshot with update strategy: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi @@ -1404,7 +1404,7 @@ manifest should match snapshot without TLS: resources: limits: cpu: 200m - memory: 32Mi + memory: 64Mi requests: cpu: 50m memory: 16Mi diff --git a/charts/steadybit-extension-host/values.yaml b/charts/steadybit-extension-host/values.yaml index d9950c7..7ab2771 100644 --- a/charts/steadybit-extension-host/values.yaml +++ b/charts/steadybit-extension-host/values.yaml @@ -67,7 +67,7 @@ resources: cpu: "50m" limits: # resources.limits.memory -- The limit of memory to be used - memory: "32Mi" + memory: "64Mi" # resources.limits.cpu -- The limit of cpu share to be used during its interval cpu: "200m" diff --git a/e2e/integration_test.go b/e2e/integration_test.go index 75189b8..688822a 100644 --- a/e2e/integration_test.go +++ b/e2e/integration_test.go @@ -163,7 +163,8 @@ func TestWithMinikube(t *testing.T) { } func testStressCpu(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { - log.Info().Msg("Starting testStressCpu") + cleanupSidecars(m, e) + config := struct { Duration int `json:"duration"` CpuLoad int `json:"cpuLoad"` @@ -178,6 +179,8 @@ func testStressCpu(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { } func testStressMemory(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { + cleanupSidecars(m, e) + tests := []struct { name string failOnOomKill bool @@ -234,6 +237,8 @@ func testStressMemory(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { } func testStressIo(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { + cleanupSidecars(m, e) + err := m.SshExec("sudo", "mkdir", "-p", "/stressng").Run() require.NoError(t, err) @@ -372,7 +377,8 @@ func testShutdownHost(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { } func testNetworkBlackhole(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { - log.Info().Msg("Starting testNetworkBlackhole") + cleanupSidecars(m, e) + nginx := e2e.Nginx{Minikube: m} err := nginx.Deploy("nginx-network-blackhole") require.NoError(t, err, "failed to create pod") @@ -450,7 +456,8 @@ func testNetworkBlackhole(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { } func testNetworkDelay(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { - log.Info().Msg("Starting testNetworkDelay") + cleanupSidecars(m, e) + netperf := e2e.Netperf{Minikube: m} err := netperf.Deploy("delay") defer func() { _ = netperf.Delete() }() @@ -541,7 +548,8 @@ func testNetworkDelay(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { } func testNetworkPackageLoss(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { - log.Info().Msg("Starting testNetworkPackageLoss") + cleanupSidecars(m, e) + iperf := e2e.Iperf{Minikube: m} err := iperf.Deploy("loss") defer func() { _ = iperf.Delete() }() @@ -611,7 +619,8 @@ func testNetworkPackageLoss(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { } func testNetworkPackageCorruption(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { - log.Info().Msg("Starting testNetworkPackageCorruption") + cleanupSidecars(m, e) + iperf := e2e.Iperf{Minikube: m} err := iperf.Deploy("corruption") defer func() { _ = iperf.Delete() }() @@ -696,7 +705,9 @@ func testNetworkPackageCorruption(t *testing.T, m *e2e.Minikube, e *e2e.Extensio func testNetworkLimitBandwidth(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { t.Skip("Skipping testNetworkLimitBandwidth because it does not work on minikube, but was tested manually on a real cluster") - log.Info().Msg("Starting testNetworkLimitBandwidth") + + cleanupSidecars(m, e) + iperf := e2e.Iperf{Minikube: m} err := iperf.Deploy("bandwidth") defer func() { _ = iperf.Delete() }() @@ -770,7 +781,8 @@ func testNetworkLimitBandwidth(t *testing.T, m *e2e.Minikube, e *e2e.Extension) } func testNetworkBlockDns(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { - log.Info().Msg("Starting testNetworkBlockDns") + cleanupSidecars(m, e) + nginx := e2e.Nginx{Minikube: m} err := nginx.Deploy("nginx-network-block-dns") require.NoError(t, err, "failed to create pod") @@ -838,6 +850,8 @@ func testNetworkBlockDns(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { } func testFillDisk(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { + cleanupSidecars(m, e) + pathToFill := "/filldisk" err := m.SshExec("sudo", "mkdir", "-p", pathToFill).Run() require.NoError(t, err) @@ -1002,6 +1016,8 @@ func testFillDisk(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { } func testStressCombined(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { + cleanupSidecars(m, e) + memConfig := struct { Duration int `json:"duration"` Percentage int `json:"percentage"` @@ -1089,6 +1105,8 @@ func testNetworkDelayAndBandwidthOnSameContainer(t *testing.T, m *e2e.Minikube, } func testFillMemory(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { + cleanupSidecars(m, e) + tests := []struct { name string failOnOomKill bool @@ -1154,13 +1172,19 @@ func runInMinikube(m *e2e.Minikube, arg ...string) ([]byte, error) { } func requireAllSidecarsCleanedUp(t *testing.T, m *e2e.Minikube, e *e2e.Extension) { - out, err := m.PodExec(e.Pod, "steadybit-extension-host", "ls", "/run/steadybit/runc") - if strings.Contains(out, "No such file or directory") { - return - } - require.NoError(t, err) - space := strings.TrimSpace(out) - require.Empty(t, space, "no sidecar directories must be present") + require.EventuallyWithT(t, func(t *assert.CollectT) { + out, err := m.PodExec(e.Pod, "steadybit-extension-host", "ls", "/run/steadybit/runc") + if strings.Contains(out, "No such file or directory") { + return + } + require.NoError(t, err) + space := strings.TrimSpace(out) + require.Empty(t, space, "no sidecar directories must be present") + }, 30*time.Second, 1*time.Second) +} + +func cleanupSidecars(m *e2e.Minikube, e *e2e.Extension) { + _, _ = m.PodExec(e.Pod, "steadybit-extension-host", "sh", "-c", "rm -rf /run/steadybit/runc/*") } func getMinikubeOptions() e2e.MinikubeOpts {