diff --git a/Makefile b/Makefile index 5a63fd6..4036c07 100644 --- a/Makefile +++ b/Makefile @@ -42,3 +42,6 @@ test-kind-simple: test-all: test kind-clear-clusters @go test -v ./fixtures/kind/kind_test.go @go test -v ./placement_test.go + +test-placement: clear-test-cache kind-clear-clusters + @go test -v ./placement_test.go diff --git a/README.md b/README.md index 7f5542a..554a472 100644 --- a/README.md +++ b/README.md @@ -112,10 +112,20 @@ All `gdt` test specs have the same [base fields][base-spec-fields]: * `description`: (optional) string with longer description of the test unit. * `timeout`: (optional) an object containing [timeout information][timeout] for the test unit. -* `timeout.after`: a string duration of time the test unit is expected to +* `timeout`: (optional) a string duration of time the test unit is expected to complete within. -* `timeout.expected`: a bool indicating that the test unit is expected to not - complete before `timeout.after`. This is really only useful in unit testing. +* `retry`: (optional) an object containing retry configurationu for the test + unit. Some plugins will automatically attempt to retry the test action when + an assertion fails. This field allows you to control this retry behaviour for + each individual test. +* `retry.interval`: (optional) a string duration of time that the test plugin + will retry the test action in the event assertions fail. The default interval + for retries is plugin-dependent. +* `retry.attempts`: (optional) an integer indicating the number of times that a + plugin will retry the test action in the event assertions fail. The default + number of attempts for retries is plugin-dependent. +* `retry.exponential`: (optional) a boolean indicating an exponential backoff + should be applied to the retry interval. The default is is plugin-dependent. * `wait` (optional) an object containing [wait information][wait] for the test unit. * `wait.before`: a string duration of time that gdt should wait before @@ -123,7 +133,6 @@ All `gdt` test specs have the same [base fields][base-spec-fields]: * `wait.after`: a string duration of time that gdt should wait after executing the test unit's action. -[timeout]: https://github.com/gdt-dev/gdt/blob/2791e11105fd3c36d1f11a7d111e089be7cdc84c/types/timeout.go#L11-L22 [wait]: https://github.com/gdt-dev/gdt/blob/2791e11105fd3c36d1f11a7d111e089be7cdc84c/types/wait.go#L11-L25 `gdt-kube` test specs have some additional fields that allow you to take some @@ -711,108 +720,6 @@ tests: delete: deployments/nginx ``` -### Timeouts and retrying `kube.get` assertions - -When evaluating assertions for `kube.get`, `gdt` inspects the test's -`timeout.after` value to determine how long to retry the `get` call and recheck -the assertions. - -If a test's `timeout.after` is empty, `gdt` inspects the scenario's -`defaults.timeout.after` value. If both of those values are empty, `gdt` will -use a **default timeout of 5 seconds**. - -If you're interested in seeing the individual results of `gdt`'s -assertion-checks for a single `get` call, you can use the `gdt.WithDebug()` -function, like this test function demonstrates: - -file: `testdata/matches.yaml`: - -```yaml -name: matches -description: create a deployment and check the matches condition succeeds -fixtures: - - kind -tests: - - name: create-deployment - kube: - create: testdata/manifests/nginx-deployment.yaml - - name: deployment-exists - kube: - get: deployments/nginx - assert: - matches: - spec: - replicas: 2 - template: - metadata: - labels: - app: nginx - status: - readyReplicas: 2 - - name: delete-deployment - kube: - delete: deployments/nginx -``` - -file: `matches_test.go` - -```go -import ( - "github.com/gdt-dev/gdt" - _ "github.com/gdt-dev/kube" - kindfix "github.com/gdt-dev/kube/fixture/kind" -) - -func TestMatches(t *testing.T) { - fp := filepath.Join("testdata", "matches.yaml") - - kfix := kindfix.New() - - s, err := gdt.From(fp) - - ctx := gdt.NewContext(gdt.WithDebug()) - ctx = gdt.RegisterFixture(ctx, "kind", kfix) - s.Run(ctx, t) -} -``` - -Here's what running `go test -v matches_test.go` would look like: - -``` -$ go test -v matches_test.go -=== RUN TestMatches -=== RUN TestMatches/matches -=== RUN TestMatches/matches/create-deployment -=== RUN TestMatches/matches/deployment-exists -deployment-exists (try 1 after 1.303µs) ok: false, terminal: false -deployment-exists (try 1 after 1.303µs) failure: assertion failed: match field not equal: $.status.readyReplicas not present in subject -deployment-exists (try 2 after 595.62786ms) ok: false, terminal: false -deployment-exists (try 2 after 595.62786ms) failure: assertion failed: match field not equal: $.status.readyReplicas not present in subject -deployment-exists (try 3 after 1.020003807s) ok: false, terminal: false -deployment-exists (try 3 after 1.020003807s) failure: assertion failed: match field not equal: $.status.readyReplicas not present in subject -deployment-exists (try 4 after 1.760006109s) ok: false, terminal: false -deployment-exists (try 4 after 1.760006109s) failure: assertion failed: match field not equal: $.status.readyReplicas had different values. expected 2 but found 1 -deployment-exists (try 5 after 2.772416449s) ok: true, terminal: false -=== RUN TestMatches/matches/delete-deployment ---- PASS: TestMatches (3.32s) - --- PASS: TestMatches/matches (3.30s) - --- PASS: TestMatches/matches/create-deployment (0.01s) - --- PASS: TestMatches/matches/deployment-exists (2.78s) - --- PASS: TestMatches/matches/delete-deployment (0.02s) -PASS -ok command-line-arguments 3.683s -``` - -You can see from the debug output above that `gdt` created the Deployment and -then did a `kube.get` for the `deployments/nginx` Deployment. Initially -(attempt 1), the `assert.matches` assertion failed because the -`status.readyReplicas` field was not present in the returned resource. `gdt` -retried the `kube.get` call 4 more times (attempts 2-5), with attempts 2 and 3 -failed the existence check for the `status.readyReplicas` field and attempt 4 -failing the *value* check for the `status.readyReplicas` field being `1` -instead of the expected `2`. Finally, when the Deployment was completely rolled -out, attempt 5 succeeded in all the `assert.matches` assertions. - ## Determining Kubernetes config, context and namespace values When evaluating how to construct a Kubernetes client `gdt-kube` uses the following @@ -881,12 +788,78 @@ tests: - kube.get: pods/nginx ``` +#### Retaining and deleting KinD clusters + +The default behaviour of the `KindFixture` is to delete the KinD cluster when +the Fixture's `Stop()` method is called, but **only if the KinD cluster did not +previously exist before the Fixture's `Start()` method was called**. + +If you want to *always* ensure that a KinD cluster is deleted when the +`KindFixture` is stopped, use the `fixtures.kind.WithDeleteOnStop()` function: + +```go +import ( + "github.com/gdt-dev/gdt" + gdtkube "github.com/gdt-dev/kube" + gdtkind "github.com/gdt-dev/kube/fixtures/kind" +) + +func TestExample(t *testing.T) { + s, err := gdt.From("path/to/test.yaml") + if err != nil { + t.Fatalf("failed to load tests: %s", err) + } + + ctx := context.Background() + ctx = gdt.RegisterFixture( + ctx, "kind", gdtkind.New(), + gdtkind.WithDeleteOnStop(), + ) + err = s.Run(ctx, t) + if err != nil { + t.Fatalf("failed to run tests: %s", err) + } +} +``` + +Likewise, the default behaviour of the `KindFixture` is to retain the KinD +cluster when the Fixture's `Stop()` method is called but **only if the KinD +cluster previously existed before the Fixture's `Start()` method was called**. + +If you want to *always* ensure a KinD cluster is retained, even if the +KindFixture created the KinD cluster, use the +`fixtures.kind.WithRetainOnStop()` function: + +```go +import ( + "github.com/gdt-dev/gdt" + gdtkube "github.com/gdt-dev/kube" + gdtkind "github.com/gdt-dev/kube/fixtures/kind" +) + +func TestExample(t *testing.T) { + s, err := gdt.From("path/to/test.yaml") + if err != nil { + t.Fatalf("failed to load tests: %s", err) + } + + ctx := context.Background() + ctx = gdt.RegisterFixture( + ctx, "kind", gdtkind.New(), + gdtkind.WithRetainOnStop(), + ) + err = s.Run(ctx, t) + if err != nil { + t.Fatalf("failed to run tests: %s", err) + } +} +``` + #### Passing a KinD configuration You may want to pass a custom KinD configuration resource by using the `fixtures.kind.WithConfigPath()` modifier: - ```go import ( "github.com/gdt-dev/gdt" @@ -914,15 +887,6 @@ func TestExample(t *testing.T) { } ``` -In your test file, you would list the "kind" fixture in the `fixtures` list: - -```yaml -name: example-using-kind -fixtures: - - kind -tests: - - kube.get: pods/nginx -``` ## Contributing and acknowledgements `gdt` was inspired by [Gabbi](https://github.com/cdent/gabbi), the excellent diff --git a/action.go b/action.go index ca4cfad..d823e8c 100644 --- a/action.go +++ b/action.go @@ -12,7 +12,6 @@ import ( "io" "os" "strings" - "testing" "github.com/gdt-dev/gdt/debug" gdterrors "github.com/gdt-dev/gdt/errors" @@ -93,23 +92,21 @@ func (a *Action) getCommand() string { // command is a List, `out` will be a `*unstructured.UnstructuredList`. func (a *Action) Do( ctx context.Context, - t *testing.T, c *connection, ns string, out *interface{}, ) error { cmd := a.getCommand() - debug.Println(ctx, "kube: %s [ns: %s]", cmd, ns) switch cmd { case "get": - return a.get(ctx, t, c, ns, out) + return a.get(ctx, c, ns, out) case "create": - return a.create(ctx, t, c, ns, out) + return a.create(ctx, c, ns, out) case "delete": - return a.delete(ctx, t, c, ns) + return a.delete(ctx, c, ns) case "apply": - return a.apply(ctx, t, c, ns, out) + return a.apply(ctx, c, ns, out) default: return fmt.Errorf("unknown command") } @@ -120,7 +117,6 @@ func (a *Action) Do( // `out` with the response value. func (a *Action) get( ctx context.Context, - t *testing.T, c *connection, ns string, out *interface{}, @@ -134,13 +130,13 @@ func (a *Action) get( return err } if name == "" { - list, err := a.doList(ctx, t, c, res, ns) + list, err := a.doList(ctx, c, res, ns) if err == nil { *out = list } return err } else { - obj, err := a.doGet(ctx, t, c, res, ns, name) + obj, err := a.doGet(ctx, c, res, ns, name) if err == nil { *out = obj } @@ -151,22 +147,33 @@ func (a *Action) get( // doList performs the List() call for a supplied resource kind func (a *Action) doList( ctx context.Context, - t *testing.T, c *connection, res schema.GroupVersionResource, ns string, ) (*unstructured.UnstructuredList, error) { + resName := res.Resource + labelSelString := "" opts := metav1.ListOptions{} withlabels := a.Get.Labels() if withlabels != nil { // We already validated the label selector during parse-time - opts.LabelSelector = labels.Set(withlabels).String() + labelsStr := labels.Set(withlabels).String() + labelSelString = fmt.Sprintf(" (labels: %s)", labelsStr) + opts.LabelSelector = labelsStr } if c.resourceNamespaced(res) { + debug.Println( + ctx, "kube.get: %s%s (ns: %s)", + resName, labelSelString, ns, + ) return c.client.Resource(res).Namespace(ns).List( ctx, opts, ) } + debug.Println( + ctx, "kube.get: %s%s (non-namespaced resource)", + resName, labelSelString, + ) return c.client.Resource(res).List( ctx, opts, ) @@ -175,19 +182,27 @@ func (a *Action) doList( // doGet performs the Get() call for a supplied resource kind and name func (a *Action) doGet( ctx context.Context, - t *testing.T, c *connection, res schema.GroupVersionResource, ns string, name string, ) (*unstructured.Unstructured, error) { + resName := res.Resource if c.resourceNamespaced(res) { + debug.Println( + ctx, "kube.get: %s/%s (ns: %s)", + resName, name, ns, + ) return c.client.Resource(res).Namespace(ns).Get( ctx, name, metav1.GetOptions{}, ) } + debug.Println( + ctx, "kube.get: %s/%s (non-namespaced resource)", + resName, name, + ) return c.client.Resource(res).Get( ctx, name, @@ -199,7 +214,6 @@ func (a *Action) doGet( // evaluates any assertions that have been set for the returned results. func (a *Action) create( ctx context.Context, - t *testing.T, c *connection, ns string, out *interface{}, @@ -244,6 +258,8 @@ func (a *Action) create( if err != nil { return err } + resName := res.Resource + debug.Println(ctx, "kube.create: %s (ns: %s)", resName, ons) obj, err := c.client.Resource(res).Namespace(ons).Create( ctx, obj, @@ -262,7 +278,6 @@ func (a *Action) create( // evaluates any assertions that have been set for the returned results. func (a *Action) apply( ctx context.Context, - t *testing.T, c *connection, ns string, out *interface{}, @@ -307,6 +322,8 @@ func (a *Action) apply( if err != nil { return err } + resName := res.Resource + debug.Println(ctx, "kube.apply: %s (ns: %s)", resName, ons) obj, err := c.client.Resource(res).Namespace(ns).Apply( ctx, // NOTE(jaypipes): Not sure why a separate name argument is @@ -332,7 +349,6 @@ func (a *Action) apply( // and evaluates any assertions that have been set for the returned results. func (a *Action) delete( ctx context.Context, - t *testing.T, c *connection, ns string, ) error { @@ -362,7 +378,7 @@ func (a *Action) delete( if ons == "" { ons = ns } - if err = a.doDelete(ctx, t, c, res, name, ns); err != nil { + if err = a.doDelete(ctx, c, res, name, ns); err != nil { return err } } @@ -378,20 +394,24 @@ func (a *Action) delete( return err } if name == "" { - return a.doDeleteCollection(ctx, t, c, res, ns) + return a.doDeleteCollection(ctx, c, res, ns) } - return a.doDelete(ctx, t, c, res, ns, name) + return a.doDelete(ctx, c, res, ns, name) } // doDelete performs the Delete() call on a kind and name func (a *Action) doDelete( ctx context.Context, - t *testing.T, c *connection, res schema.GroupVersionResource, ns string, name string, ) error { + resName := res.Resource + debug.Println( + ctx, "kube.delete: %s/%s (ns: %s)", + resName, name, ns, + ) return c.client.Resource(res).Namespace(ns).Delete( ctx, name, @@ -403,21 +423,28 @@ func (a *Action) doDelete( // resource kind func (a *Action) doDeleteCollection( ctx context.Context, - t *testing.T, c *connection, res schema.GroupVersionResource, ns string, ) error { - listOpts := metav1.ListOptions{} + opts := metav1.ListOptions{} withlabels := a.Delete.Labels() + labelSelString := "" if withlabels != nil { // We already validated the label selector during parse-time - listOpts.LabelSelector = labels.Set(withlabels).String() + labelsStr := labels.Set(withlabels).String() + labelSelString = fmt.Sprintf(" (labels: %s)", labelsStr) + opts.LabelSelector = labelsStr } + resName := res.Resource + debug.Println( + ctx, "kube.delete: %s%s (ns: %s)", + resName, labelSelString, ns, + ) return c.client.Resource(res).Namespace(ns).DeleteCollection( ctx, metav1.DeleteOptions{}, - listOpts, + opts, ) } diff --git a/eval.go b/eval.go index a635937..618e751 100644 --- a/eval.go +++ b/eval.go @@ -6,86 +6,35 @@ package kube import ( "context" - "testing" - "time" - "github.com/cenkalti/backoff/v4" - "github.com/gdt-dev/gdt/debug" gdterrors "github.com/gdt-dev/gdt/errors" "github.com/gdt-dev/gdt/result" - gdttypes "github.com/gdt-dev/gdt/types" -) - -const ( - // defaultGetTimeout is used as a retry max time if the spec's Timeout has - // not been specified. - defaultGetTimeout = time.Second * 5 ) // Eval performs an action and evaluates the results of that action, returning // a Result that informs the Scenario about what failed or succeeded. A new // Kubernetes client request is made during this call. -func (s *Spec) Eval(ctx context.Context, t *testing.T) *result.Result { +func (s *Spec) Eval(ctx context.Context) (*result.Result, error) { c, err := s.connect(ctx) if err != nil { - return result.New( - result.WithRuntimeError(ConnectError(err)), - ) + return nil, ConnectError(err) } - var a gdttypes.Assertions ns := s.Namespace() - // if the Spec has no timeout, default it to a reasonable value - var cancel context.CancelFunc - _, hasDeadline := ctx.Deadline() - if !hasDeadline { - ctx, cancel = context.WithTimeout(ctx, defaultGetTimeout) - defer cancel() - } - - // retry the action and test the assertions until they succeed, there is a - // terminal failure, or the timeout expires. - bo := backoff.WithContext(backoff.NewExponentialBackOff(), ctx) - ticker := backoff.NewTicker(bo) - attempts := 0 - start := time.Now().UTC() - success := false - for tick := range ticker.C { - attempts++ - after := tick.Sub(start) - - var out interface{} - err := s.Kube.Do(ctx, t, c, ns, &out) - if err != nil { - if err == gdterrors.ErrTimeoutExceeded { - return result.New(result.WithFailures(gdterrors.ErrTimeoutExceeded)) - } - if err == gdterrors.RuntimeError { - return result.New(result.WithRuntimeError(err)) - } - } - a = newAssertions(c, s.Assert, err, out) - success = a.OK(ctx) - debug.Println( - ctx, "%s (try %d after %s) ok: %v", - s.Title(), attempts, after, success, - ) - if success { - ticker.Stop() - break + var out interface{} + err = s.Kube.Do(ctx, c, ns, &out) + if err != nil { + if err == gdterrors.ErrTimeoutExceeded { + return result.New(result.WithFailures(gdterrors.ErrTimeoutExceeded)), nil } - for _, f := range a.Failures() { - debug.Println( - ctx, "%s (try %d after %s) failure: %s", - s.Title(), attempts, after, f, - ) + if err == gdterrors.RuntimeError { + return nil, err } } - if !success { - for _, fail := range a.Failures() { - t.Error(fail) - } + a := newAssertions(c, s.Assert, err, out) + if a.OK(ctx) { + return result.New(), nil } - return result.New(result.WithFailures(a.Failures()...)) + return result.New(result.WithFailures(a.Failures()...)), nil } diff --git a/fixtures/kind/kind.go b/fixtures/kind/kind.go index 1982eda..8b6e5a7 100644 --- a/fixtures/kind/kind.go +++ b/fixtures/kind/kind.go @@ -57,7 +57,7 @@ type KindFixture struct { ConfigPath string } -func (f *KindFixture) Start(ctx context.Context) { +func (f *KindFixture) Start(ctx context.Context) error { ctx = gdtcontext.PushTrace(ctx, "fixtures.kind.start") defer func() { ctx = gdtcontext.PopTrace(ctx) @@ -68,7 +68,7 @@ func (f *KindFixture) Start(ctx context.Context) { if f.isRunning() { debug.Println(ctx, "cluster %s already running", f.ClusterName) f.runningBeforeStart = true - return + return nil } opts := []cluster.CreateOption{} if f.ConfigPath != "" { @@ -79,13 +79,14 @@ func (f *KindFixture) Start(ctx context.Context) { opts = append(opts, cluster.CreateWithConfigFile(f.ConfigPath)) } if err := f.provider.Create(f.ClusterName, opts...); err != nil { - panic(err) + return err } debug.Println(ctx, "cluster %s successfully created", f.ClusterName) if !f.retainOnStop { f.deleteOnStop = true debug.Println(ctx, "cluster %s will be deleted on stop", f.ClusterName) } + return nil } func (f *KindFixture) isRunning() bool { diff --git a/go.mod b/go.mod index 38d2b71..9e4abb0 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,7 @@ module github.com/gdt-dev/kube go 1.21 require ( - github.com/cenkalti/backoff/v4 v4.2.1 - github.com/gdt-dev/gdt v1.6.2 + github.com/gdt-dev/gdt v1.8.0 github.com/samber/lo v1.38.1 github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v3 v3.0.1 @@ -19,6 +18,7 @@ require ( github.com/PaesslerAG/gval v1.0.0 // indirect github.com/PaesslerAG/jsonpath v0.1.1 // indirect github.com/alessio/shellescape v1.4.1 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect diff --git a/go.sum b/go.sum index 803368e..643a84b 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,8 @@ github.com/PaesslerAG/jsonpath v0.1.1 h1:c1/AToHQMVsduPAa4Vh6xp2U0evy4t8SWp8imEs github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -21,8 +21,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/gdt-dev/gdt v1.6.2 h1:ZHugSKIpMdO8hLQ1pGMPs0xGrLwQIutliDFfTjCkdys= -github.com/gdt-dev/gdt v1.6.2/go.mod h1:qkAfKZpEIYy4ymXcDvcZpfxgVvRDQTpSqeU/ze/EobU= +github.com/gdt-dev/gdt v1.8.0 h1:JWuRbvSmVdNMQ8qHw1LZqVsQZEL4oM3mr85Z1L48vwQ= +github.com/gdt-dev/gdt v1.8.0/go.mod h1:oph7/YpGDMhnOz2TkNyrllpAiwmGaMyFjUAXhAGNZHI= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/placement.go b/placement.go index 0274432..64deb53 100644 --- a/placement.go +++ b/placement.go @@ -9,7 +9,6 @@ import ( "fmt" "strings" - gdtcontext "github.com/gdt-dev/gdt/context" "github.com/gdt-dev/gdt/debug" "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/resource" @@ -126,10 +125,6 @@ func (a *assertions) placementSpreadOK( if len(topoKeys) == 0 { return true } - ctx = gdtcontext.PushTrace(ctx, "assert-placement-spread") - defer func() { - ctx = gdtcontext.PopTrace(ctx) - }() nodes := getNodes(ctx, a.c) domainNodes := map[string][]string{} for _, k := range topoKeys { @@ -164,20 +159,20 @@ func (a *assertions) placementSpreadOK( // the min and max number of pods on each domain is not greater than 1. for domain, nodes := range domainNodes { debug.Println( - ctx, "domain: %s, unique nodes: %d", + ctx, "placement-spread: domain: %s, unique nodes: %d", domain, len(nodes), ) if len(nodes) > 0 { nodeCounts := lo.Values(podDomains[domain]) debug.Println( - ctx, "domain: %s, pods per node: %d", + ctx, "placement-spread: domain: %s, pods per node: %d", domain, nodeCounts, ) minCount := lo.Min(nodeCounts) maxCount := lo.Max(nodeCounts) skew := maxCount - minCount - if skew > 1 { + if skew >= 1 { msg := fmt.Sprintf( "found uneven spread skew of %d for domain %s", skew, domain, diff --git a/plugin.go b/plugin.go index 26999df..8cb097a 100644 --- a/plugin.go +++ b/plugin.go @@ -10,6 +10,13 @@ import ( "gopkg.in/yaml.v3" ) +var ( + // DefaultTimeout is the default timeout used for each individual test + // spec. Note that gdt's top-level Scenario.Run handles all timeout and + // retry behaviour. + DefaultTimeout = "5s" +) + func init() { gdt.RegisterPlugin(Plugin()) } @@ -23,6 +30,12 @@ type plugin struct{} func (p *plugin) Info() gdttypes.PluginInfo { return gdttypes.PluginInfo{ Name: pluginName, + Retry: &gdttypes.Retry{ + Exponential: true, + }, + Timeout: &gdttypes.Timeout{ + After: DefaultTimeout, + }, } } diff --git a/testdata/placement-spread.yaml b/testdata/placement-spread.yaml index 7e20cb2..5043be8 100644 --- a/testdata/placement-spread.yaml +++ b/testdata/placement-spread.yaml @@ -7,8 +7,7 @@ tests: kube: create: testdata/manifests/nginx-deployment-spread-zones.yaml - name: deployment-ready - timeout: - after: 40s + timeout: 40s kube: get: deployments/nginx-spread-zones assert: