diff --git a/README.md b/README.md index 72da0a7..107fc84 100644 --- a/README.md +++ b/README.md @@ -540,6 +540,108 @@ test spec also contains these fields: [execspec]: https://github.com/gdt-dev/gdt/blob/2791e11105fd3c36d1f11a7d111e089be7cdc84c/exec/spec.go#L11-L34 [pipeexpect]: https://github.com/gdt-dev/gdt/blob/2791e11105fd3c36d1f11a7d111e089be7cdc84c/exec/assertions.go#L15-L26 +### Timeouts and retrying assertions + +When evaluating assertions for a test spec, `gdt` inspects the test's +`timeout` value to determine how long to retry the `get` call and recheck +the assertions. + +If a test's `timeout` is empty, `gdt` inspects the scenario's +`defaults.timeout` value. If both of those values are empty, `gdt` will look +for any default `timeout` value that the plugin uses. + +If you're interested in seeing the individual results of `gdt`'s +assertion-checks for a single `get` call, you can use the `gdt.WithDebug()` +function, like this test function demonstrates: + +file: `testdata/matches.yaml`: + +```yaml +name: matches +description: create a deployment and check the matches condition succeeds +fixtures: + - kind +tests: + - name: create-deployment + kube: + create: testdata/manifests/nginx-deployment.yaml + - name: deployment-exists + kube: + get: deployments/nginx + assert: + matches: + spec: + replicas: 2 + template: + metadata: + labels: + app: nginx + status: + readyReplicas: 2 + - name: delete-deployment + kube: + delete: deployments/nginx +``` + +file: `matches_test.go` + +```go +import ( + "github.com/gdt-dev/gdt" + _ "github.com/gdt-dev/kube" + kindfix "github.com/gdt-dev/kube/fixture/kind" +) + +func TestMatches(t *testing.T) { + fp := filepath.Join("testdata", "matches.yaml") + + kfix := kindfix.New() + + s, err := gdt.From(fp) + + ctx := gdt.NewContext(gdt.WithDebug()) + ctx = gdt.RegisterFixture(ctx, "kind", kfix) + s.Run(ctx, t) +} +``` + +Here's what running `go test -v matches_test.go` would look like: + +``` +$ go test -v matches_test.go +=== RUN TestMatches +=== RUN TestMatches/matches +=== RUN TestMatches/matches/create-deployment +=== RUN TestMatches/matches/deployment-exists +deployment-exists (try 1 after 1.303µs) ok: false, terminal: false +deployment-exists (try 1 after 1.303µs) failure: assertion failed: match field not equal: $.status.readyReplicas not present in subject +deployment-exists (try 2 after 595.62786ms) ok: false, terminal: false +deployment-exists (try 2 after 595.62786ms) failure: assertion failed: match field not equal: $.status.readyReplicas not present in subject +deployment-exists (try 3 after 1.020003807s) ok: false, terminal: false +deployment-exists (try 3 after 1.020003807s) failure: assertion failed: match field not equal: $.status.readyReplicas not present in subject +deployment-exists (try 4 after 1.760006109s) ok: false, terminal: false +deployment-exists (try 4 after 1.760006109s) failure: assertion failed: match field not equal: $.status.readyReplicas had different values. expected 2 but found 1 +deployment-exists (try 5 after 2.772416449s) ok: true, terminal: false +=== RUN TestMatches/matches/delete-deployment +--- PASS: TestMatches (3.32s) + --- PASS: TestMatches/matches (3.30s) + --- PASS: TestMatches/matches/create-deployment (0.01s) + --- PASS: TestMatches/matches/deployment-exists (2.78s) + --- PASS: TestMatches/matches/delete-deployment (0.02s) +PASS +ok command-line-arguments 3.683s +``` + +You can see from the debug output above that `gdt` created the Deployment and +then did a `kube.get` for the `deployments/nginx` Deployment. Initially +(attempt 1), the `assert.matches` assertion failed because the +`status.readyReplicas` field was not present in the returned resource. `gdt` +retried the `kube.get` call 4 more times (attempts 2-5), with attempts 2 and 3 +failed the existence check for the `status.readyReplicas` field and attempt 4 +failing the *value* check for the `status.readyReplicas` field being `1` +instead of the expected `2`. Finally, when the Deployment was completely rolled +out, attempt 5 succeeded in all the `assert.matches` assertions. + ## Contributing and acknowledgements `gdt` was inspired by [Gabbi](https://github.com/cdent/gabbi), the excellent diff --git a/context/context_test.go b/context/context_test.go index 6259926..7644948 100644 --- a/context/context_test.go +++ b/context/context_test.go @@ -43,8 +43,8 @@ func (s *fooSpec) UnmarshalYAML(node *yaml.Node) error { return nil } -func (s *fooSpec) Eval(ctx context.Context, t *testing.T) *result.Result { - return nil +func (s *fooSpec) Eval(ctx context.Context) (*result.Result, error) { + return nil, nil } type fooPlugin struct{} diff --git a/plugin/exec/action.go b/plugin/exec/action.go index f1b8ef2..d172ef0 100644 --- a/plugin/exec/action.go +++ b/plugin/exec/action.go @@ -8,7 +8,6 @@ import ( "bytes" "context" "os/exec" - "testing" gdtcontext "github.com/gdt-dev/gdt/context" "github.com/gdt-dev/gdt/debug" @@ -38,7 +37,6 @@ type Action struct { // respectively. func (a *Action) Do( ctx context.Context, - t *testing.T, outbuf *bytes.Buffer, errbuf *bytes.Buffer, exitcode *int, diff --git a/plugin/exec/eval.go b/plugin/exec/eval.go index 8897972..a02d3cf 100644 --- a/plugin/exec/eval.go +++ b/plugin/exec/eval.go @@ -7,7 +7,6 @@ package exec import ( "bytes" "context" - "testing" "github.com/gdt-dev/gdt/debug" gdterrors "github.com/gdt-dev/gdt/errors" @@ -17,17 +16,21 @@ import ( // Eval performs an action and evaluates the results of that action, returning // a Result that informs the Scenario about what failed or succeeded about the // Evaluable's conditions. -func (s *Spec) Eval(ctx context.Context, t *testing.T) *result.Result { +// +// Errors returned by Eval() are **RuntimeErrors**, not failures in assertions. +func (s *Spec) Eval( + ctx context.Context, +) (*result.Result, error) { outbuf := &bytes.Buffer{} errbuf := &bytes.Buffer{} var ec int - if err := s.Do(ctx, t, outbuf, errbuf, &ec); err != nil { + if err := s.Do(ctx, outbuf, errbuf, &ec); err != nil { if err == gdterrors.ErrTimeoutExceeded { - return result.New(result.WithFailures(gdterrors.ErrTimeoutExceeded)) + return result.New(result.WithFailures(gdterrors.ErrTimeoutExceeded)), nil } - return result.New(result.WithRuntimeError(ExecRuntimeError(err))) + return nil, ExecRuntimeError(err) } a := newAssertions(s.Assert, ec, outbuf, errbuf) if !a.OK(ctx) { @@ -35,12 +38,12 @@ func (s *Spec) Eval(ctx context.Context, t *testing.T) *result.Result { if s.On.Fail != nil { outbuf.Reset() errbuf.Reset() - err := s.On.Fail.Do(ctx, t, outbuf, errbuf, nil) + err := s.On.Fail.Do(ctx, outbuf, errbuf, nil) if err != nil { debug.Println(ctx, "error in on.fail.exec: %s", err) } } } } - return result.New(result.WithFailures(a.Failures()...)) + return result.New(result.WithFailures(a.Failures()...)), nil } diff --git a/plugin/registry_test.go b/plugin/registry_test.go index 24db5a7..b778f90 100644 --- a/plugin/registry_test.go +++ b/plugin/registry_test.go @@ -36,8 +36,8 @@ func (s *fooSpec) Base() *gdttypes.Spec { return &s.Spec } -func (s *fooSpec) Eval(context.Context, *testing.T) *result.Result { - return nil +func (s *fooSpec) Eval(context.Context) (*result.Result, error) { + return nil, nil } func (s *fooSpec) UnmarshalYAML(node *yaml.Node) error { diff --git a/result/result.go b/result/result.go index c281f25..8182460 100644 --- a/result/result.go +++ b/result/result.go @@ -4,13 +4,6 @@ package result -import ( - "errors" - "fmt" - - gdterrors "github.com/gdt-dev/gdt/errors" -) - // Result is returned from a `Evaluable.Eval` execution. It serves two // purposes: // @@ -23,9 +16,6 @@ import ( // returned in the Result and the `Scenario.Run` method injects that // information into the context that is supplied to the next Spec's `Run`. type Result struct { - // err is any error that was returned from the Evaluable's execution. This - // is guaranteed to be a `gdterrors.RuntimeError`. - err error // failures is the collection of error messages from assertion failures // that occurred during Eval(). These are *not* `gdterrors.RuntimeError`. failures []error @@ -36,17 +26,6 @@ type Result struct { data map[string]interface{} } -// HasRuntimeError returns true if the Eval() returned a runtime error, false -// otherwise. -func (r *Result) HasRuntimeError() bool { - return r.err != nil -} - -// RuntimeError returns the runtime error -func (r *Result) RuntimeError() error { - return r.err -} - // HasData returns true if any of the run data has been set, false otherwise. func (r *Result) HasData() bool { return r.data != nil @@ -86,19 +65,6 @@ func (r *Result) SetFailures(failures ...error) { type ResultModifier func(*Result) -// WithRuntimeError modifies the Result with the supplied error -func WithRuntimeError(err error) ResultModifier { - if !errors.Is(err, gdterrors.RuntimeError) { - msg := fmt.Sprintf("expected %s to be a gdterrors.RuntimeError", err) - // panic here because a plugin author incorrectly implemented their - // plugin Spec's Eval() method... - panic(msg) - } - return func(r *Result) { - r.err = err - } -} - // WithData modifies the Result with the supplied run data key and value func WithData(key string, val interface{}) ResultModifier { return func(r *Result) { diff --git a/scenario/run.go b/scenario/run.go index 36bb477..79152d6 100644 --- a/scenario/run.go +++ b/scenario/run.go @@ -7,6 +7,7 @@ package scenario import ( "context" "fmt" + "strconv" "strings" "testing" "time" @@ -52,9 +53,9 @@ func (s *Scenario) Run(ctx context.Context, t *testing.T) error { // collection, evaluate those first and if any failed, skip the scenario's // tests. for _, skipIf := range s.SkipIf { - res := skipIf.Eval(ctx, t) - if res.HasRuntimeError() { - return res.RuntimeError() + res, err := skipIf.Eval(ctx) + if err != nil { + return err } if len(res.Failures()) == 0 { t.Skipf( @@ -70,11 +71,19 @@ func (s *Scenario) Run(ctx context.Context, t *testing.T) error { ctx = gdtcontext.PopTrace(ctx) }() for idx, spec := range s.Tests { + sb := spec.Base() + wait := sb.Wait + if wait != nil && wait.Before != "" { + debug.Println(ctx, "wait: %s before", wait.Before) + time.Sleep(wait.BeforeDuration()) + } plugin := s.evalPlugins[idx] pinfo := plugin.Info() pretry := pinfo.Retry ptimeout := pinfo.Timeout + rt := getRetry(ctx, sb.Retry, scDefaults, pretry) + // Create a brand new context that inherits the top-level context's // cancel func. We want to set deadlines for each test spec and if // we mutate the single supplied top-level context, then only the @@ -82,13 +91,6 @@ func (s *Scenario) Run(ctx context.Context, t *testing.T) error { specCtx, specCancel := context.WithCancel(ctx) defer specCancel() - sb := spec.Base() - wait := sb.Wait - if wait != nil && wait.Before != "" { - debug.Println(ctx, "wait: %s before", wait.Before) - time.Sleep(wait.BeforeDuration()) - } - to := getTimeout(ctx, sb.Timeout, ptimeout, scDefaults) if to != nil { var cancel context.CancelFunc @@ -96,84 +98,13 @@ func (s *Scenario) Run(ctx context.Context, t *testing.T) error { defer cancel() } - var res *result.Result - rt := getRetry(ctx, sb.Retry, scDefaults, pretry) - if rt == nil { - // Just evaluate the test spec once - res = spec.Eval(specCtx, t) - if res.HasRuntimeError() { - rterr = res.RuntimeError() - t.Fatal(rterr) - } - debug.Println( - ctx, "run: single-shot (no retries) ok: %v", - !res.Failed(), - ) - } else { - // retry the action and test the assertions until they succeed, - // there is a terminal failure, or the timeout expires. - var bo backoff.BackOff - - if rt.Exponential { - bo = backoff.WithContext( - backoff.NewExponentialBackOff(), - ctx, - ) - } else { - interval := gdttypes.DefaultRetryConstantInterval - if rt.Interval != "" { - interval = rt.IntervalDuration() - } - bo = backoff.WithContext( - backoff.NewConstantBackOff(interval), - ctx, - ) - } - ticker := backoff.NewTicker(bo) - maxAttempts := 0 - if rt.Attempts != nil { - maxAttempts = *rt.Attempts - } - attempts := 1 - start := time.Now().UTC() - success := false - for tick := range ticker.C { - if (maxAttempts > 0) && (attempts > maxAttempts) { - debug.Println( - ctx, "run: exceeded max attempts %d. stopping.", - maxAttempts, - ) - ticker.Stop() - break - } - after := tick.Sub(start) - - res = spec.Eval(specCtx, t) - if res.HasRuntimeError() { - rterr = res.RuntimeError() - t.Fatal(rterr) - break - } - success = !res.Failed() - debug.Println( - ctx, "run: attempt %d after %s ok: %v", - attempts, after, success, - ) - if success { - ticker.Stop() - break - } - for _, f := range res.Failures() { - debug.Println( - ctx, "run: attempt %d after %s failure: %s", - attempts, after, f, - ) - } - attempts++ - } + res, rterr := s.runSpec(specCtx, rt, to, idx, spec) + if rterr != nil { + break } - for _, fail := range res.Failures() { - t.Error(fail) + if wait != nil && wait.After != "" { + debug.Println(ctx, "wait: %s after", wait.After) + time.Sleep(wait.AfterDuration()) } // Results can have arbitrary run data stored in them and we // save this prior run data in the top-level context (and pass @@ -181,15 +112,108 @@ func (s *Scenario) Run(ctx context.Context, t *testing.T) error { if res.HasData() { ctx = gdtcontext.StorePriorRun(ctx, res.Data()) } - if wait != nil && wait.After != "" { - debug.Println(ctx, "wait: %s after", wait.After) - time.Sleep(wait.AfterDuration()) + for _, fail := range res.Failures() { + t.Error(fail) } } }) return rterr } +// runSpec executes an individual test spec +func (s *Scenario) runSpec( + ctx context.Context, + retry *gdttypes.Retry, + timeout *gdttypes.Timeout, + idx int, + spec gdttypes.Evaluable, +) (*result.Result, error) { + sb := spec.Base() + specTraceMsg := strconv.Itoa(idx) + if sb.Name != "" { + specTraceMsg += ":" + sb.Name + } + ctx = gdtcontext.PushTrace(ctx, specTraceMsg) + defer func() { + ctx = gdtcontext.PopTrace(ctx) + }() + if retry == nil { + // Just evaluate the test spec once + res, err := spec.Eval(ctx) + if err != nil { + return nil, err + } + debug.Println( + ctx, "run: single-shot (no retries) ok: %v", + !res.Failed(), + ) + return res, nil + } + + // retry the action and test the assertions until they succeed, + // there is a terminal failure, or the timeout expires. + var bo backoff.BackOff + var res *result.Result + var err error + + if retry.Exponential { + bo = backoff.WithContext( + backoff.NewExponentialBackOff(), + ctx, + ) + } else { + interval := gdttypes.DefaultRetryConstantInterval + if retry.Interval != "" { + interval = retry.IntervalDuration() + } + bo = backoff.WithContext( + backoff.NewConstantBackOff(interval), + ctx, + ) + } + ticker := backoff.NewTicker(bo) + maxAttempts := 0 + if retry.Attempts != nil { + maxAttempts = *retry.Attempts + } + attempts := 1 + start := time.Now().UTC() + success := false + for tick := range ticker.C { + if (maxAttempts > 0) && (attempts > maxAttempts) { + debug.Println( + ctx, "run: exceeded max attempts %d. stopping.", + maxAttempts, + ) + ticker.Stop() + break + } + after := tick.Sub(start) + + res, err = spec.Eval(ctx) + if err != nil { + return nil, err + } + success = !res.Failed() + debug.Println( + ctx, "run: attempt %d after %s ok: %v", + attempts, after, success, + ) + if success { + ticker.Stop() + break + } + for _, f := range res.Failures() { + debug.Println( + ctx, "run: attempt %d after %s failure: %s", + attempts, after, f, + ) + } + attempts++ + } + return res, nil +} + // getTimeout returns the timeout value for the test spec. If the spec has a // timeout override, we use that. Otherwise, we inspect the scenario's defaults // and, if present, use that timeout. If the scenario's defaults for not diff --git a/scenario/run_test.go b/scenario/run_test.go index e3a1432..a1030f1 100644 --- a/scenario/run_test.go +++ b/scenario/run_test.go @@ -137,7 +137,7 @@ func TestNoRetry(t *testing.T) { w.Flush() require.NotEqual(b.Len(), 0) debugout := b.String() - require.Contains(debugout, "[gdt] [no-retry] run: single-shot (no retries) ok: true") + require.Contains(debugout, "[gdt] [no-retry/0:bar] run: single-shot (no retries) ok: true") } func TestFailRetryTestOverride(t *testing.T) { @@ -173,7 +173,7 @@ func TestRetryTestOverride(t *testing.T) { require.NotNil(err) debugout := string(outerr) - require.Contains(debugout, "[gdt] [retry-test-override] run: exceeded max attempts 2. stopping.") + require.Contains(debugout, "[gdt] [retry-test-override/0:baz] run: exceeded max attempts 2. stopping.") } func TestSkipIf(t *testing.T) { diff --git a/scenario/stub_plugins_test.go b/scenario/stub_plugins_test.go index f702bda..f49b336 100644 --- a/scenario/stub_plugins_test.go +++ b/scenario/stub_plugins_test.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "strconv" - "testing" gdtcontext "github.com/gdt-dev/gdt/context" "github.com/gdt-dev/gdt/debug" @@ -18,7 +17,6 @@ import ( "github.com/gdt-dev/gdt/result" gdttypes "github.com/gdt-dev/gdt/types" "github.com/samber/lo" - "github.com/stretchr/testify/assert" "gopkg.in/yaml.v3" ) @@ -84,12 +82,8 @@ func (s *failSpec) Base() *gdttypes.Spec { return &s.Spec } -func (s *failSpec) Eval(context.Context, *testing.T) *result.Result { - return result.New( - result.WithRuntimeError( - fmt.Errorf("%w: Indy, bad dates!", gdterrors.RuntimeError), - ), - ) +func (s *failSpec) Eval(context.Context) (*result.Result, error) { + return nil, fmt.Errorf("%w: Indy, bad dates!", gdterrors.RuntimeError) } func (s *failSpec) UnmarshalYAML(node *yaml.Node) error { @@ -220,28 +214,19 @@ func (s *fooSpec) UnmarshalYAML(node *yaml.Node) error { return nil } -func (s *fooSpec) Eval(ctx context.Context, t *testing.T) *result.Result { +func (s *fooSpec) Eval(ctx context.Context) (*result.Result, error) { fails := []error{} - t.Run(s.Title(), func(t *testing.T) { - ctx = gdtcontext.PushTrace(ctx, s.Title()) - defer func() { - ctx = gdtcontext.PopTrace(ctx) - }() - debug.Println(ctx, "in %s Foo=%s", s.Title(), s.Foo) - // This is just a silly test to demonstrate how to write Eval() methods - // for plugin Spec specialization classes. - if s.Name == "bar" && s.Foo != "bar" { - fail := fmt.Errorf("expected s.Foo = 'bar', got %s", s.Foo) - fails = append(fails, fail) - } else if s.Name != "bar" && s.Foo != "baz" { - fail := fmt.Errorf("expected s.Foo = 'baz', got %s", s.Foo) - fails = append(fails, fail) - } - }) - for _, fail := range fails { - t.Error(fail) + debug.Println(ctx, "in %s Foo=%s", s.Title(), s.Foo) + // This is just a silly test to demonstrate how to write Eval() methods + // for plugin Spec specialization classes. + if s.Name == "bar" && s.Foo != "bar" { + fail := fmt.Errorf("expected s.Foo = 'bar', got %s", s.Foo) + fails = append(fails, fail) + } else if s.Name != "bar" && s.Foo != "baz" { + fail := fmt.Errorf("expected s.Foo = 'baz', got %s", s.Foo) + fails = append(fails, fail) } - return result.New(result.WithFailures(fails...)) + return result.New(result.WithFailures(fails...)), nil } type fooPlugin struct{} @@ -281,8 +266,8 @@ func (s *barSpec) Base() *gdttypes.Spec { return &s.Spec } -func (s *barSpec) Eval(context.Context, *testing.T) *result.Result { - return result.New() +func (s *barSpec) Eval(context.Context) (*result.Result, error) { + return result.New(), nil } func (s *barSpec) UnmarshalYAML(node *yaml.Node) error { @@ -390,21 +375,28 @@ func (s *priorRunSpec) UnmarshalYAML(node *yaml.Node) error { return nil } -func (s *priorRunSpec) Eval(ctx context.Context, t *testing.T) *result.Result { - t.Run(s.Title(), func(t *testing.T) { - assert := assert.New(t) - // Here we test that the prior run data that we save at the end of each - // Run() is showing up properly in the next Run()'s context. - prData := gdtcontext.PriorRun(ctx) - if s.Index == 0 { - assert.Empty(prData) - } else { - assert.Contains(prData, priorRunDataKey) - assert.IsType(prData[priorRunDataKey], "") - assert.Equal(s.Prior, prData[priorRunDataKey]) +func (s *priorRunSpec) Eval(ctx context.Context) (*result.Result, error) { + // Here we test that the prior run data that we save at the end of each + // Run() is showing up properly in the next Run()'s context. + fails := []error{} + prData := gdtcontext.PriorRun(ctx) + if s.Index == 0 { + if len(prData) != 0 { + fails = append(fails, fmt.Errorf("expected prData to be empty")) + } + } else { + data, ok := prData[priorRunDataKey] + if !ok { + fails = append(fails, fmt.Errorf("expected priorRunDataKey in priorRun map")) } - }) - return result.New(result.WithData(priorRunDataKey, s.State)) + if s.Prior != data { + fails = append(fails, fmt.Errorf("expected priorRunData == data")) + } + } + return result.New( + result.WithFailures(fails...), + result.WithData(priorRunDataKey, s.State), + ), nil } type priorRunPlugin struct{} diff --git a/types/evaluable.go b/types/evaluable.go index cb2450b..eeca127 100644 --- a/types/evaluable.go +++ b/types/evaluable.go @@ -6,7 +6,6 @@ package types import ( "context" - "testing" "github.com/gdt-dev/gdt/result" ) @@ -16,7 +15,10 @@ type Evaluable interface { // Eval performs an action and evaluates the results of that action, // returning a Result that informs the Scenario about what failed or // succeeded about the Evaluable's conditions. - Eval(context.Context, *testing.T) *result.Result + // + // Errors returned by Eval() are **RuntimeErrors**, not failures in + // assertions. + Eval(context.Context) (*result.Result, error) // SetBase sets the Evaluable's base Spec SetBase(Spec) // Base returns the Evaluable's base Spec