From 5ad73ef8f7a456faa38296f6c2ff5b5958ba2153 Mon Sep 17 00:00:00 2001 From: Quintush <2246480+quintush@users.noreply.github.com> Date: Wed, 15 Apr 2020 14:10:07 +0200 Subject: [PATCH] Update documentation with missing assertions, remove duplication code for formatter testing. --- DOCUMENT.md | 20 +- unittest/junit_report_xml_test.go | 247 +++++++----------- unittest/nunit_report_xml.go | 2 +- unittest/nunit_report_xml_test.go | 395 ++++++++++------------------ unittest/test_job_test.go | 26 +- unittest/test_runner_test.go | 40 +-- unittest/test_suite_test.go | 34 +-- unittest/utils.go | 3 + unittest/utils_test.go | 77 ++++++ unittest/validators/common_test.go | 9 + unittest/valueutils/valueutils.go | 8 +- unittest/xunit_report_xml.go | 28 +- unittest/xunit_report_xml_test.go | 403 +++++++++-------------------- 13 files changed, 537 insertions(+), 755 deletions(-) create mode 100644 unittest/utils_test.go diff --git a/DOCUMENT.md b/DOCUMENT.md index 75f70077..7db3dfa4 100644 --- a/DOCUMENT.md +++ b/DOCUMENT.md @@ -16,7 +16,7 @@ A test suite is a collection of tests with the same purpose and scope defined in suite: test deploy and service templates: - deployment.yaml - - service.yaml + - web/service.yaml tests: - it: should test something ... @@ -24,7 +24,7 @@ tests: - **suite**: *string, optional*. The suite name to show on test result output. -- **templates**: *array of string, recommended*. The template files scope to test in this suite, only the ones specified here is rendered during testing. If omitted, all template files are rendered. File suffixed with `.tpl` is added automatically, you don't need to add them again. +- **templates**: *array of string, recommended*. The template files scope to test in this suite, only the ones specified here is rendered during testing. If omitted, all template files are rendered. Template files that are put in a templates sub-folder can be addressed with a linux path separator. File suffixed with `.tpl` is added automatically even if it is in a templates sub-folder, you don't need to add them again. - **tests**: *array of test job, required*. Where you define your test jobs to run, check [Test Job](#test-job). @@ -62,7 +62,7 @@ tests: - **set**: *object of any, optional*. Set the values directly in suite file. The key is the value path with the format just like `--set` option of `helm install`, for example `image.pullPolicy`. The value is anything you want to set to the path specified by the key, which can be even an array or an object. -- **template**: *string, optional*. The template file which render the manifest to be tested, default to the list of template file defined in `templates` of suite file, unless the template is defined in the assertions (see Assertion). +- **template**: *string, optional*. The template file which render the manifest to be tested, default to the list of template file defined in `templates` of suite file, unless the template is defined in the assertions (check [Assertion](#assertion)). - **documentIndex**: *int, optional*. The index of rendered documents (devided by `---`) to be tested, default to -1, which results in asserting all documents (see Assertion). Generally you can ignored this field if the template file render only one document. @@ -81,7 +81,7 @@ Define assertions in the test job to validate the manifests rendered with values ```yaml templates: - deployment.yaml - - service.yaml + - web/service.yaml tests: - it: should pass asserts: @@ -92,7 +92,7 @@ tests: path: metadata.name value: your-service not: true - template: service.yaml + template: web/service.yaml documentIndex: 0 ``` @@ -111,11 +111,15 @@ Available assertion types are listed below: | Assertion Type | Parameters | Description | Example | |----------------|------------|-------------|---------| | `equal` | **path**: *string*. The `set` path to assert.
**value**: *any*. The expected value. | Assert the value of specified **path** equal to the **value**. |
equal:
path: metadata.name
value: my-deploy
| +| `equalRaw` |
**value**: *string*. Assert the expected value in a NOTES.txt file. | Assert equal to the **value**. |
equalRaw:
value: my-deploy
| | `notEqual` | **path**: *string*. The `set` path to assert.
**value**: *any*. The value expected not to be. | Assert the value of specified **path** NOT equal to the **value**. |
notEqual:
path: metadata.name
value: my-deploy
| +| `notEqualRaw` |
**value**: *string*. Assert the expected value in a NOTES.txt file not to be. | Assert equal NOT to the **value**. |
notEqual:
value: my-deploy
| | `matchRegex` | **path**: *string*. The `set` path to assert, the value must be a *string*.
**pattern**: *string*. The regex pattern to match (without quoting `/`). | Assert the value of specified **path** match **pattern**. |
matchRegex:
path: metadata.name
pattern: -my-chart$
| +| `matchRegexRaw` | **pattern**: *string*. The regex pattern to match (without quoting `/`) in a NOTES.txt file. | Assert the value match **pattern**. |
matchRegexRaw:
pattern: -my-notes$
| | `notMatchRegex` | **path**: *string*. The `set` path to assert, the value must be a *string*.
**pattern**: *string*. The regex pattern NOT to match (without quoting `/`). | Assert the value of specified **path** NOT match **pattern**. |
notMatchRegex:
path: metadata.name
pattern: -my-chat$
| -| `contains` | **path**: *string*. The `set` path to assert, the value must be an *array*.
**content**: *any*. The content to be contained.
**count**: *int, optional*. The count of content to be contained.
**any**: *bool, optional*. ignores any other values within the found content. | Assert the array as the value of specified **path** contains the **content**. |
contains:
path: spec.ports
content:
name: web
port: 80
targetPort: 80
protocle:TCP
| -| `notContains` | **path**: *string*. The `set` path to assert, the value must be an *array*.
**content**: *any*. The content NOT to be contained. | Assert the array as the value of specified **path** NOT contains the **content**. |
notContains:
path: spec.ports
content:
name: server
port: 80
targetPort: 80
protocle: TCP
| +| `notMatchRegexRaw` | **pattern**: *string*. The regex pattern NOT to match (without quoting `/`) in a NOTES.txt file. | Assert the value NOT match **pattern**. |
notMatchRegexRaw:
pattern: -my-notes$
| +| `contains` | **path**: *string*. The `set` path to assert, the value must be an *array*.
**content**: *any*. The content to be contained.
**count**: *int, optional*. The count of content to be contained.
**any**: *bool, optional*. ignores any other values within the found content. | Assert the array as the value of specified **path** contains the **content**. |
contains:
path: spec.ports
content:
name: web
port: 80
targetPort: 80
protocle:TCP

contains:
path: spec.ports
content:
name: web
count: 1
any: true
| +| `notContains` | **path**: *string*. The `set` path to assert, the value must be an *array*.
**content**: *any*. The content NOT to be contained. | Assert the array as the value of specified **path** NOT contains the **content**. |
notContains:
path: spec.ports
content:
name: server
port: 80
targetPort: 80
protocle: TCP

contains:
path: spec.ports
content:
name: web
count: 1
any: true
| | `isNull` | **path**: *string*. The `set` path to assert. | Assert the value of specified **path** is `null`. |
isNull:
path: spec.strategy
| | `isNotNull` | **path**: *string*. The `set` path to assert. | Assert the value of specified **path** is NOT `null`. |
isNotNull:
path: spec.replicas
| | `isEmpty` | **path**: *string*. The `set` path to assert. | Assert the value of specified **path** is empty (`null`, `""`, `0`, `[]`, `{}`). |
isEmpty:
path: spec.tls
| @@ -124,7 +128,7 @@ Available assertion types are listed below: | `isAPIVersion` | **of**: *string*. Expected `apiVersion` of manifest. | Assert the `apiVersion` value **of** manifest, is equilevant to:
equal:
path: apiVersion
value: ...
|
isAPIVersion:
of: v2
| | `hasDocuments` | **count**: *int*. Expected count of documents rendered. | Assert the documents count rendered by the `template` specified. The `documentIndex` option is ignored here. |
hasDocuments:
count: 2
| | `matchSnapshot` | **path**: *string*. The `set` path for snapshot. | Assert the value of **path** is the same as snapshotted last time. Check [doc](./README.md#snapshot-testing) below. |
matchSnapshot:
path: spec
| - +| `matchSnapshotRaw` | | Assert the value in the NOTES.txt is the same as snapshotted last time. Check [doc](./README.md#snapshot-testing) below. |
matchSnapshotRaw: {}
| ### Antonym and `not` Notice that there are some antonym assertions, the following two assertions actually have same effect: diff --git a/unittest/junit_report_xml_test.go b/unittest/junit_report_xml_test.go index 4e99a7fa..d173e695 100644 --- a/unittest/junit_report_xml_test.go +++ b/unittest/junit_report_xml_test.go @@ -4,7 +4,6 @@ import ( "encoding/xml" "fmt" "io/ioutil" - "os" "path" "testing" @@ -12,7 +11,91 @@ import ( "github.com/stretchr/testify/assert" ) -var tmpJUnitTestDir, _ = ioutil.TempDir("", "_suite_tests") +var tmpJUnitTestDir, _ = ioutil.TempDir("", testSuiteTests) + +func createJUnitTestCase(classname, name, failureContent string) JUnitTestCase { + testCase := JUnitTestCase{ + Classname: classname, + Name: name, + } + + if len(failureContent) > 0 { + testCase.Failure = &JUnitFailure{ + Message: "Failed", + Type: "", + Contents: failureContent, + } + } + + return testCase +} + +func createJUnitProperty(name, value string) JUnitProperty { + return JUnitProperty{ + Name: name, + Value: value, + } +} + +func assertJUnitTestSuite(assert *assert.Assertions, expected, actual []JUnitTestSuite) { + + if expected != nil && actual != nil { + actualLength := len(actual) + assert.Equal(len(expected), actualLength) + + for i := 0; i < actualLength; i++ { + assert.Equal(expected[i].Tests, actual[i].Tests) + assert.Equal(expected[i].Failures, actual[i].Failures) + assert.Equal(expected[i].Name, actual[i].Name) + + assertJUnitProperty(assert, expected[i].Properties, actual[i].Properties) + assertJUnitTestCase(assert, expected[i].TestCases, actual[i].TestCases) + } + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected == nil && actual == nil) + } +} + +func assertJUnitTestCase(assert *assert.Assertions, expected, actual []JUnitTestCase) { + + if expected != nil && actual != nil { + actualLength := len(actual) + assert.Equal(len(expected), actualLength) + + for i := 0; i < actualLength; i++ { + assert.Equal(expected[i].Classname, actual[i].Classname) + assert.Equal(expected[i].Name, actual[i].Name) + + if expected[i].Failure != nil && actual[i].Failure != nil { + assert.Equal(expected[i].Failure.Message, actual[i].Failure.Message) + assert.Equal(expected[i].Failure.Type, actual[i].Failure.Type) + assert.Equal(expected[i].Failure.Contents, actual[i].Failure.Contents) + } else { + assert.True(expected[i].Failure == nil && actual[i].Failure == nil) + } + } + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected == nil && actual == nil) + } +} + +func assertJUnitProperty(assert *assert.Assertions, expected, actual []JUnitProperty) { + + if expected != nil && actual != nil { + actualLength := len(actual) + assert.Equal(len(expected), actualLength) + + for i := 0; i < actualLength; i++ { + assert.Equal(expected[i].Name, actual[i].Name) + assert.Equal(expected[i].Value, actual[i].Value) + } + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected == nil && actual == nil) + } +} func TestWriteTestOutputAsJUnitMinimalSuccess(t *testing.T) { assert := assert.New(t) @@ -27,16 +110,10 @@ func TestWriteTestOutputAsJUnitMinimalSuccess(t *testing.T) { Failures: 0, Name: testSuiteDisplayName, Properties: []JUnitProperty{ - { - Name: "helm-unittest.version", - Value: "1.6", - }, + createJUnitProperty("helm-unittest.version", "1.6"), }, TestCases: []JUnitTestCase{ - { - Classname: testSuiteDisplayName, - Name: testCaseDisplayName, - }, + createJUnitTestCase(testSuiteDisplayName, testCaseDisplayName, ""), }, }, }, @@ -48,39 +125,18 @@ func TestWriteTestOutputAsJUnitMinimalSuccess(t *testing.T) { FilePath: outputFile, Passed: true, TestsResult: []*TestJobResult{ - { - DisplayName: testCaseDisplayName, - Passed: true, - }, + createTestJobResult(testCaseDisplayName, "", true, nil), }, }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewJUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual JUnitTestSuites xml.Unmarshal(bytevalue, &actual) assertJUnitTestSuite(assert, expected.Suites, actual.Suites) - - testResult.Close() - os.Remove(outputFile) } func TestWriteTestOutputAsJUnitWithFailures(t *testing.T) { @@ -101,142 +157,37 @@ func TestWriteTestOutputAsJUnitWithFailures(t *testing.T) { Failures: 1, Name: testSuiteDisplayName, Properties: []JUnitProperty{ - { - Name: "helm-unittest.version", - Value: "1.6", - }, + createJUnitProperty("helm-unittest.version", "1.6"), }, TestCases: []JUnitTestCase{ - { - Classname: testSuiteDisplayName, - Name: testCaseSuccessDisplayName, - }, - { - Classname: testSuiteDisplayName, - Name: testCaseFailureDisplayName, - Failure: &JUnitFailure{ - Message: "Failed", - Type: "", - Contents: failureContent, - }, - }, + createJUnitTestCase(testSuiteDisplayName, testCaseSuccessDisplayName, ""), + createJUnitTestCase(testSuiteDisplayName, testCaseFailureDisplayName, failureContent), }, }, }, } + assertionResults := []*AssertionResult{ + createAssertionResult(0, false, false, assertionType, assertionFailure, ""), + } + given := []*TestSuiteResult{ { DisplayName: testSuiteDisplayName, FilePath: outputFile, Passed: true, TestsResult: []*TestJobResult{ - { - DisplayName: testCaseSuccessDisplayName, - Passed: true, - }, - { - DisplayName: testCaseFailureDisplayName, - Passed: false, - AssertsResult: []*AssertionResult{ - { - Index: 0, - FailInfo: []string{ - assertionFailure, - }, - Passed: false, - AssertType: assertionType, - Not: false, - }, - }, - }, + createTestJobResult(testCaseSuccessDisplayName, "", true, nil), + createTestJobResult(testCaseFailureDisplayName, "", false, assertionResults), }, }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewJUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual JUnitTestSuites xml.Unmarshal(bytevalue, &actual) assertJUnitTestSuite(assert, expected.Suites, actual.Suites) - - testResult.Close() - os.Remove(outputFile) -} - -func assertJUnitTestSuite(assert *assert.Assertions, expected, actual []JUnitTestSuite) { - - if expected != nil && actual != nil { - actualLength := len(actual) - assert.Equal(len(expected), actualLength) - - for i := 0; i < actualLength; i++ { - assert.Equal(expected[i].Tests, actual[i].Tests) - assert.Equal(expected[i].Failures, actual[i].Failures) - assert.Equal(expected[i].Name, actual[i].Name) - - assertJUnitProperty(assert, expected[i].Properties, actual[i].Properties) - assertJUnitTestCase(assert, expected[i].TestCases, actual[i].TestCases) - } - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected == nil && actual == nil) - } -} - -func assertJUnitTestCase(assert *assert.Assertions, expected, actual []JUnitTestCase) { - - if expected != nil && actual != nil { - actualLength := len(actual) - assert.Equal(len(expected), actualLength) - - for i := 0; i < actualLength; i++ { - assert.Equal(expected[i].Classname, actual[i].Classname) - assert.Equal(expected[i].Name, actual[i].Name) - - if expected[i].Failure != nil && actual[i].Failure != nil { - assert.Equal(expected[i].Failure.Message, actual[i].Failure.Message) - assert.Equal(expected[i].Failure.Type, actual[i].Failure.Type) - assert.Equal(expected[i].Failure.Contents, actual[i].Failure.Contents) - } else { - assert.True(expected[i].Failure == nil && actual[i].Failure == nil) - } - } - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected == nil && actual == nil) - } -} - -func assertJUnitProperty(assert *assert.Assertions, expected, actual []JUnitProperty) { - - if expected != nil && actual != nil { - actualLength := len(actual) - assert.Equal(len(expected), actualLength) - - for i := 0; i < actualLength; i++ { - assert.Equal(expected[i].Name, actual[i].Name) - assert.Equal(expected[i].Value, actual[i].Value) - } - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected == nil && actual == nil) - } } diff --git a/unittest/nunit_report_xml.go b/unittest/nunit_report_xml.go index 6ddc58ea..f97775b1 100644 --- a/unittest/nunit_report_xml.go +++ b/unittest/nunit_report_xml.go @@ -222,7 +222,7 @@ func (j *nUnitReportXML) WriteTestOutput(testSuiteResults []*TestSuiteResult, no CurrentUICulture: currentUICulture, }, TestSuite: testSuites, - Name: "Helm-Unittest", + Name: TestFramework, Total: totalTests, Errors: totalErrors, Failures: totalFailures, diff --git a/unittest/nunit_report_xml_test.go b/unittest/nunit_report_xml_test.go index c641d134..558e8972 100644 --- a/unittest/nunit_report_xml_test.go +++ b/unittest/nunit_report_xml_test.go @@ -4,7 +4,6 @@ import ( "encoding/xml" "fmt" "io/ioutil" - "os" "path" "testing" @@ -12,7 +11,87 @@ import ( "github.com/stretchr/testify/assert" ) -var tmpNunitTestDir, _ = ioutil.TempDir("", "_suite_tests") +var tmpNunitTestDir, _ = ioutil.TempDir("", testSuiteTests) + +func createNUnitTestCase(name, description, failureContent string, executed bool) NUnitTestCase { + testCase := NUnitTestCase{ + Name: name, + Description: description, + Success: "true", + Asserts: "0", + Result: "Success", + } + + if len(failureContent) > 0 { + testCase.Failure = &NUnitFailure{ + Message: "Failed", + StackTrace: failureContent, + } + testCase.Success = "false" + testCase.Result = "Failed" + } + + if executed { + testCase.Executed = "true" + } else { + testCase.Executed = "false" + } + + return testCase +} + +func validateNUnitTestSuite(assert *assert.Assertions, expected, actual []NUnitTestSuite) { + + if expected != nil && actual != nil { + actualLength := len(actual) + assert.Equal(len(expected), actualLength) + + for i := 0; i < actualLength; i++ { + assert.Equal(expected[i].Name, actual[i].Name) + assert.Equal(expected[i].Description, actual[i].Description) + assert.Equal(expected[i].Success, actual[i].Success) + assert.Equal(expected[i].Executed, actual[i].Executed) + assert.Equal(expected[i].Result, actual[i].Result) + + // Validate the testcases + validatNUnitTestCase(assert, expected[i].TestCases, actual[i].TestCases) + + // Recursive validation loop. + validateNUnitTestSuite(assert, expected[i].TestSuites, actual[i].TestSuites) + } + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected == nil && actual == nil) + } +} + +func validatNUnitTestCase(assert *assert.Assertions, expected, actual []NUnitTestCase) { + + if expected != nil && actual != nil { + actualLength := len(actual) + assert.Equal(len(expected), actualLength) + + for i := 0; i < actualLength; i++ { + assert.Equal(expected[i].Name, actual[i].Name) + assert.Equal(expected[i].Description, actual[i].Description) + assert.Equal(expected[i].Success, actual[i].Success) + assert.Equal(expected[i].Asserts, actual[i].Asserts) + assert.Equal(expected[i].Executed, actual[i].Executed) + assert.Equal(expected[i].Result, actual[i].Result) + + if expected[i].Failure != nil || actual[i].Failure != nil { + assert.Equal(expected[i].Failure.Message, actual[i].Failure.Message) + assert.Equal(expected[i].Failure.StackTrace, actual[i].Failure.StackTrace) + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected[i].Failure == nil && actual[i].Failure == nil) + } + } + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected == nil && actual == nil) + } +} func TestWriteTestOutputAsNUnitMinimalSuccess(t *testing.T) { assert := assert.New(t) @@ -32,19 +111,16 @@ func TestWriteTestOutputAsNUnitMinimalSuccess(t *testing.T) { Executed: "true", Result: "Success", TestCases: []NUnitTestCase{ - { - Failure: nil, - Name: testCaseDisplayName, - Description: fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseDisplayName), - Success: "true", - Executed: "true", - Asserts: "0", - Result: "Success", - }, + createNUnitTestCase( + testCaseDisplayName, + fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseDisplayName), + "", + true, + ), }, }, }, - Name: "Helm-Unittest", + Name: TestFramework, Total: 1, Errors: 0, } @@ -55,31 +131,13 @@ func TestWriteTestOutputAsNUnitMinimalSuccess(t *testing.T) { FilePath: outputFile, Passed: true, TestsResult: []*TestJobResult{ - { - DisplayName: testCaseDisplayName, - Passed: true, - }, + createTestJobResult(testCaseDisplayName, "", true, nil), }, }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewNUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual NUnitTestResults xml.Unmarshal(bytevalue, &actual) @@ -88,9 +146,6 @@ func TestWriteTestOutputAsNUnitMinimalSuccess(t *testing.T) { assert.Equal(expected.Errors, actual.Errors) assert.Equal(expected.Failures, actual.Failures) validateNUnitTestSuite(assert, expected.TestSuite, actual.TestSuite) - - testResult.Close() - os.Remove(outputFile) } func TestWriteTestOutputAsNUnitWithFailures(t *testing.T) { @@ -116,82 +171,45 @@ func TestWriteTestOutputAsNUnitWithFailures(t *testing.T) { Executed: "true", Result: "Failed", TestCases: []NUnitTestCase{ - { - Failure: nil, - Name: testCaseSuccessDisplayName, - Description: fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseSuccessDisplayName), - Success: "true", - Executed: "true", - Asserts: "0", - Result: "Success", - }, - { - Failure: &NUnitFailure{ - Message: "Failed", - StackTrace: failureContent, - }, - Name: testCaseFailureDisplayName, - Description: fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseFailureDisplayName), - Success: "false", - Executed: "true", - Asserts: "0", - Result: "Failed", - }, + createNUnitTestCase( + testCaseSuccessDisplayName, + fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseSuccessDisplayName), + "", + true, + ), + createNUnitTestCase( + testCaseFailureDisplayName, + fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseFailureDisplayName), + failureContent, + true, + ), }, }, }, - Name: "Helm-Unittest", + Name: TestFramework, Total: 2, Errors: 0, Failures: 1, } + assertionResults := []*AssertionResult{ + createAssertionResult(0, false, false, assertionType, assertionFailure, ""), + } + given := []*TestSuiteResult{ { DisplayName: testSuiteDisplayName, FilePath: outputFile, Passed: false, TestsResult: []*TestJobResult{ - { - DisplayName: testCaseSuccessDisplayName, - Passed: true, - }, - { - DisplayName: testCaseFailureDisplayName, - Passed: false, - AssertsResult: []*AssertionResult{ - { - Index: 0, - FailInfo: []string{ - assertionFailure, - }, - Passed: false, - AssertType: assertionType, - Not: false, - }, - }, - }, + createTestJobResult(testCaseSuccessDisplayName, "", true, nil), + createTestJobResult(testCaseFailureDisplayName, "", false, assertionResults), }, }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewNUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual NUnitTestResults xml.Unmarshal(bytevalue, &actual) @@ -200,9 +218,6 @@ func TestWriteTestOutputAsNUnitWithFailures(t *testing.T) { assert.Equal(expected.Errors, actual.Errors) assert.Equal(expected.Failures, actual.Failures) validateNUnitTestSuite(assert, expected.TestSuite, actual.TestSuite) - - testResult.Close() - os.Remove(outputFile) } func TestWriteTestOutputAsNUnitWithFailuresAndErrors(t *testing.T) { @@ -231,110 +246,52 @@ func TestWriteTestOutputAsNUnitWithFailuresAndErrors(t *testing.T) { Executed: "true", Result: "Failed", TestCases: []NUnitTestCase{ - { - Failure: nil, - Name: testCaseSuccessDisplayName, - Description: fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseSuccessDisplayName), - Success: "true", - Executed: "true", - Asserts: "0", - Result: "Success", - }, - { - Failure: &NUnitFailure{ - Message: "Failed", - StackTrace: failureContent, - }, - Name: testCaseFailureDisplayName, - Description: fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseFailureDisplayName), - Success: "false", - Executed: "true", - Asserts: "0", - Result: "Failed", - }, - { - Failure: &NUnitFailure{ - Message: "Failed", - StackTrace: failureErrorContent, - }, - Name: testCaseErrorDisplayName, - Description: fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseErrorDisplayName), - Success: "false", - Executed: "false", - Asserts: "0", - Result: "Failed", - }, + createNUnitTestCase( + testCaseSuccessDisplayName, + fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseSuccessDisplayName), + "", + true, + ), + createNUnitTestCase( + testCaseFailureDisplayName, + fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseFailureDisplayName), + failureContent, + true, + ), + createNUnitTestCase( + testCaseErrorDisplayName, + fmt.Sprintf("%s.%s", testSuiteDisplayName, testCaseErrorDisplayName), + failureErrorContent, + false, + ), }, }, }, - Name: "Helm-Unittest", + Name: TestFramework, Total: 3, Errors: 1, Failures: 1, } + assertionResults := []*AssertionResult{ + createAssertionResult(0, false, false, assertionType, assertionFailure, ""), + } + given := []*TestSuiteResult{ { DisplayName: testSuiteDisplayName, FilePath: outputFile, Passed: false, TestsResult: []*TestJobResult{ - { - DisplayName: testCaseSuccessDisplayName, - Passed: true, - }, - { - DisplayName: testCaseFailureDisplayName, - Passed: false, - AssertsResult: []*AssertionResult{ - { - Index: 0, - FailInfo: []string{ - assertionFailure, - }, - Passed: false, - AssertType: assertionType, - Not: false, - }, - }, - }, - { - DisplayName: testCaseErrorDisplayName, - Passed: false, - ExecError: fmt.Errorf("%s", errorMessage), - AssertsResult: []*AssertionResult{ - { - Index: 0, - FailInfo: []string{ - assertionFailure, - }, - Passed: false, - AssertType: assertionType, - Not: false, - }, - }, - }, + createTestJobResult(testCaseSuccessDisplayName, "", true, nil), + createTestJobResult(testCaseFailureDisplayName, "", false, assertionResults), + createTestJobResult(testCaseErrorDisplayName, errorMessage, false, assertionResults), }, }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewNUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual NUnitTestResults xml.Unmarshal(bytevalue, &actual) @@ -343,9 +300,6 @@ func TestWriteTestOutputAsNUnitWithFailuresAndErrors(t *testing.T) { assert.Equal(expected.Errors, actual.Errors) assert.Equal(expected.Failures, actual.Failures) validateNUnitTestSuite(assert, expected.TestSuite, actual.TestSuite) - - testResult.Close() - os.Remove(outputFile) } func TestWriteTestOutputAsNUnitWithErrors(t *testing.T) { @@ -371,7 +325,7 @@ func TestWriteTestOutputAsNUnitWithErrors(t *testing.T) { }, }, }, - Name: "Helm-Unittest", + Name: TestFramework, Total: 1, Errors: 1, Failures: 0, @@ -386,23 +340,8 @@ func TestWriteTestOutputAsNUnitWithErrors(t *testing.T) { }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewNUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual NUnitTestResults xml.Unmarshal(bytevalue, &actual) @@ -411,60 +350,4 @@ func TestWriteTestOutputAsNUnitWithErrors(t *testing.T) { assert.Equal(expected.Errors, actual.Errors) assert.Equal(expected.Failures, actual.Failures) validateNUnitTestSuite(assert, expected.TestSuite, actual.TestSuite) - - testResult.Close() - os.Remove(outputFile) -} - -func validateNUnitTestSuite(assert *assert.Assertions, expected, actual []NUnitTestSuite) { - - if expected != nil && actual != nil { - actualLength := len(actual) - assert.Equal(len(expected), actualLength) - - for i := 0; i < actualLength; i++ { - assert.Equal(expected[i].Name, actual[i].Name) - assert.Equal(expected[i].Description, actual[i].Description) - assert.Equal(expected[i].Success, actual[i].Success) - assert.Equal(expected[i].Executed, actual[i].Executed) - assert.Equal(expected[i].Result, actual[i].Result) - - // Validate the testcases - validatNUnitTestCase(assert, expected[i].TestCases, actual[i].TestCases) - - // Recursive validation loop. - validateNUnitTestSuite(assert, expected[i].TestSuites, actual[i].TestSuites) - } - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected == nil && actual == nil) - } -} - -func validatNUnitTestCase(assert *assert.Assertions, expected, actual []NUnitTestCase) { - - if expected != nil && actual != nil { - actualLength := len(actual) - assert.Equal(len(expected), actualLength) - - for i := 0; i < actualLength; i++ { - assert.Equal(expected[i].Name, actual[i].Name) - assert.Equal(expected[i].Description, actual[i].Description) - assert.Equal(expected[i].Success, actual[i].Success) - assert.Equal(expected[i].Asserts, actual[i].Asserts) - assert.Equal(expected[i].Executed, actual[i].Executed) - assert.Equal(expected[i].Result, actual[i].Result) - - if expected[i].Failure != nil || actual[i].Failure != nil { - assert.Equal(expected[i].Failure.Message, actual[i].Failure.Message) - assert.Equal(expected[i].Failure.StackTrace, actual[i].Failure.StackTrace) - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected[i].Failure == nil && actual[i].Failure == nil) - } - } - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected == nil && actual == nil) - } } diff --git a/unittest/test_job_test.go b/unittest/test_job_test.go index 9c9e8e4c..e8ba177a 100644 --- a/unittest/test_job_test.go +++ b/unittest/test_job_test.go @@ -61,7 +61,7 @@ asserts: } func TestV2RunJobOk(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) manifest := ` it: should work asserts: @@ -90,7 +90,7 @@ asserts: } func TestV2RunJobWithNOTESTemplateOk(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) manifest := ` it: should work template: NOTES.txt @@ -118,7 +118,7 @@ asserts: } func TestV2RunJobWithTestJobTemplateOk(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) manifest := ` it: should work template: deployment.yaml @@ -145,7 +145,7 @@ asserts: } func TestV2RunJobWithAssertionFail(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) manifest := ` it: should work asserts: @@ -175,7 +175,7 @@ asserts: } func TestV2RunJobWithValueSet(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) manifest := ` it: should work set: @@ -201,7 +201,7 @@ asserts: } func TestV2RunJobWithValuesFile(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) manifest := ` it: should work values: @@ -230,7 +230,7 @@ asserts: } func TestV2RunJobWithReleaseSettings(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) manifest := ` it: should work release: @@ -257,7 +257,7 @@ asserts: } func TestV3RunJobOk(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) manifest := ` it: should work asserts: @@ -286,7 +286,7 @@ asserts: } func TestV3RunJobWithTestJobTemplateOk(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) manifest := ` it: should work template: deployment.yaml @@ -313,7 +313,7 @@ asserts: } func TestV3RunJobWithAssertionFail(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) manifest := ` it: should work asserts: @@ -343,7 +343,7 @@ asserts: } func TestV3RunJobWithValueSet(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) manifest := ` it: should work set: @@ -369,7 +369,7 @@ asserts: } func TestV3RunJobWithValuesFile(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) manifest := ` it: should work values: @@ -398,7 +398,7 @@ asserts: } func TestV3RunJobWithReleaseSettings(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) manifest := ` it: should work release: diff --git a/unittest/test_runner_test.go b/unittest/test_runner_test.go index f58c62f4..b78df4bb 100644 --- a/unittest/test_runner_test.go +++ b/unittest/test_runner_test.go @@ -61,10 +61,10 @@ func TestV2RunnerOkWithPassedTests(t *testing.T) { runner := TestRunner{ Printer: NewPrinter(buffer, nil), Config: TestConfig{ - TestFiles: []string{"tests/*_test.yaml"}, + TestFiles: []string{testTestFiles}, }, } - passed := runner.RunV2([]string{"../__fixtures__/v2/basic"}) + passed := runner.RunV2([]string{testV2BasicChart}) assert.True(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -74,10 +74,10 @@ func TestV2RunnerOkWithFailedTests(t *testing.T) { runner := TestRunner{ Printer: NewPrinter(buffer, nil), Config: TestConfig{ - TestFiles: []string{"tests_failed/*_test.yaml"}, + TestFiles: []string{testTestFailedFiles}, }, } - passed := runner.RunV2([]string{"../__fixtures__/v2/basic"}) + passed := runner.RunV2([]string{testV2BasicChart}) assert.False(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -87,10 +87,10 @@ func TestV2RunnerOkWithSubSubfolder(t *testing.T) { runner := TestRunner{ Printer: NewPrinter(buffer, nil), Config: TestConfig{ - TestFiles: []string{"tests/*_test.yaml"}, + TestFiles: []string{testTestFiles}, }, } - passed := runner.RunV2([]string{"../__fixtures__/v2/with-subfolder"}) + passed := runner.RunV2([]string{testV2WithSubFolderChart}) assert.True(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -101,10 +101,10 @@ func TestV2RunnerWithTestsInSubchart(t *testing.T) { Printer: NewPrinter(buffer, nil), Config: TestConfig{ WithSubChart: true, - TestFiles: []string{"tests/*_test.yaml"}, + TestFiles: []string{testTestFiles}, }, } - passed := runner.RunV2([]string{"../__fixtures__/v2/with-subchart"}) + passed := runner.RunV2([]string{testV2WithSubChart}) assert.True(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -115,10 +115,10 @@ func TestV2RunnerWithTestsInSubchartButFlagFalse(t *testing.T) { Printer: NewPrinter(buffer, nil), Config: TestConfig{ WithSubChart: false, - TestFiles: []string{"tests/*_test.yaml"}, + TestFiles: []string{testTestFiles}, }, } - passed := runner.RunV2([]string{"../__fixtures__/v2/with-subchart"}) + passed := runner.RunV2([]string{testV2WithSubChart}) assert.True(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -128,10 +128,10 @@ func TestV3RunnerOkWithPassedTests(t *testing.T) { runner := TestRunner{ Printer: NewPrinter(buffer, nil), Config: TestConfig{ - TestFiles: []string{"tests/*_test.yaml"}, + TestFiles: []string{testTestFiles}, }, } - passed := runner.RunV3([]string{"../__fixtures__/v3/basic"}) + passed := runner.RunV3([]string{testV3BasicChart}) assert.True(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -141,10 +141,10 @@ func TestV3RunnerOkWithFailedTests(t *testing.T) { runner := TestRunner{ Printer: NewPrinter(buffer, nil), Config: TestConfig{ - TestFiles: []string{"tests_failed/*_test.yaml"}, + TestFiles: []string{testTestFailedFiles}, }, } - passed := runner.RunV3([]string{"../__fixtures__/v3/basic"}) + passed := runner.RunV3([]string{testV3BasicChart}) assert.False(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -154,10 +154,10 @@ func TestV3RunnerOkWithSubSubfolder(t *testing.T) { runner := TestRunner{ Printer: NewPrinter(buffer, nil), Config: TestConfig{ - TestFiles: []string{"tests/*_test.yaml"}, + TestFiles: []string{testTestFiles}, }, } - passed := runner.RunV3([]string{"../__fixtures__/v3/with-subfolder"}) + passed := runner.RunV3([]string{testV3WithSubFolderChart}) assert.True(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -168,10 +168,10 @@ func TestV3RunnerWithTestsInSubchart(t *testing.T) { Printer: NewPrinter(buffer, nil), Config: TestConfig{ WithSubChart: true, - TestFiles: []string{"tests/*_test.yaml"}, + TestFiles: []string{testTestFiles}, }, } - passed := runner.RunV3([]string{"../__fixtures__/v3/with-subchart"}) + passed := runner.RunV3([]string{testV3WithSubChart}) assert.True(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } @@ -182,10 +182,10 @@ func TestV3RunnerWithTestsInSubchartButFlagFalse(t *testing.T) { Printer: NewPrinter(buffer, nil), Config: TestConfig{ WithSubChart: false, - TestFiles: []string{"tests/*_test.yaml"}, + TestFiles: []string{testTestFiles}, }, } - passed := runner.RunV3([]string{"../__fixtures__/v3/with-subchart"}) + passed := runner.RunV3([]string{testV3WithSubChart}) assert.True(t, passed, buffer.String()) cupaloy.SnapshotT(t, makeOutputSnapshotable(buffer.String())...) } diff --git a/unittest/test_suite_test.go b/unittest/test_suite_test.go index 0138b60f..761182ea 100644 --- a/unittest/test_suite_test.go +++ b/unittest/test_suite_test.go @@ -15,7 +15,7 @@ import ( v2util "k8s.io/helm/pkg/chartutil" ) -var tmpdir, _ = ioutil.TempDir("", "_suite_tests") +var tmpdir, _ = ioutil.TempDir("", testSuiteTests) func makeTestSuiteResultSnapshotable(result *TestSuiteResult) *TestSuiteResult { @@ -70,7 +70,7 @@ func TestV2ParseTestSuiteFileInSubfolderOk(t *testing.T) { } func TestV2RunSuiteWithMultipleTemplatesWhenPass(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) suiteDoc := ` suite: validate metadata templates: @@ -102,14 +102,14 @@ tests: testSuite := TestSuite{} yaml.Unmarshal([]byte(suiteDoc), &testSuite) - cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "my_test.yaml"), false) + cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "v2_multple_template_test.yaml"), false) suiteResult := testSuite.RunV2(c, cache, &TestSuiteResult{}) validateTestResultAndSnapshots(t, suiteResult, true, "validate metadata", 1, 4, 4, 0, 0) } func TestV2RunSuiteWhenPass(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) suiteDoc := ` suite: test suite name templates: @@ -125,14 +125,14 @@ tests: testSuite := TestSuite{} yaml.Unmarshal([]byte(suiteDoc), &testSuite) - cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "my_test.yaml"), false) + cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "v2_suite_test.yaml"), false) suiteResult := testSuite.RunV2(c, cache, &TestSuiteResult{}) validateTestResultAndSnapshots(t, suiteResult, true, "test suite name", 1, 2, 2, 0, 0) } func TestV2RunSuiteWhenFail(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/basic") + c, _ := v2util.Load(testV2BasicChart) suiteDoc := ` suite: test suite name templates: @@ -147,14 +147,14 @@ tests: testSuite := TestSuite{} yaml.Unmarshal([]byte(suiteDoc), &testSuite) - cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "my_test.yaml"), false) + cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "v2_failed_suite_test.yaml"), false) suiteResult := testSuite.RunV2(c, cache, &TestSuiteResult{}) validateTestResultAndSnapshots(t, suiteResult, false, "test suite name", 1, 0, 0, 0, 0) } func TestV2RunSuiteWithSubfolderWhenPass(t *testing.T) { - c, _ := v2util.Load("../__fixtures__/v2/with-subfolder") + c, _ := v2util.Load(testV2WithSubFolderChart) suiteDoc := ` suite: test suite name templates: @@ -171,7 +171,7 @@ tests: testSuite := TestSuite{} yaml.Unmarshal([]byte(suiteDoc), &testSuite) - cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "my_test.yaml"), false) + cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "v2_subfolder_test.yaml"), false) suiteResult := testSuite.RunV2(c, cache, &TestSuiteResult{}) validateTestResultAndSnapshots(t, suiteResult, true, "test suite name", 1, 2, 2, 0, 0) @@ -188,7 +188,7 @@ func TestV3ParseTestSuiteFileOk(t *testing.T) { } func TestV3RunSuiteWithMultipleTemplatesWhenPass(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) suiteDoc := ` suite: validate metadata templates: @@ -220,14 +220,14 @@ tests: testSuite := TestSuite{} yaml.Unmarshal([]byte(suiteDoc), &testSuite) - cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "my_test.yaml"), false) + cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "v3_multiple_template_test.yaml"), false) suiteResult := testSuite.RunV3(c, cache, &TestSuiteResult{}) validateTestResultAndSnapshots(t, suiteResult, true, "validate metadata", 1, 4, 4, 0, 0) } func TestV3RunSuiteWhenPass(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) suiteDoc := ` suite: test suite name templates: @@ -243,14 +243,14 @@ tests: testSuite := TestSuite{} yaml.Unmarshal([]byte(suiteDoc), &testSuite) - cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "my_test.yaml"), false) + cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "v3_suite_test.yaml"), false) suiteResult := testSuite.RunV3(c, cache, &TestSuiteResult{}) validateTestResultAndSnapshots(t, suiteResult, true, "test suite name", 1, 2, 2, 0, 0) } func TestV3RunSuiteWhenFail(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/basic") + c, _ := loader.Load(testV3BasicChart) suiteDoc := ` suite: test suite name templates: @@ -265,14 +265,14 @@ tests: testSuite := TestSuite{} yaml.Unmarshal([]byte(suiteDoc), &testSuite) - cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "my_test.yaml"), false) + cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "v3_failed_suite_test.yaml"), false) suiteResult := testSuite.RunV3(c, cache, &TestSuiteResult{}) validateTestResultAndSnapshots(t, suiteResult, false, "test suite name", 1, 0, 0, 0, 0) } func TestV3RunSuiteWithSubfolderWhenPass(t *testing.T) { - c, _ := loader.Load("../__fixtures__/v3/with-subfolder") + c, _ := loader.Load(testV3WithSubFolderChart) suiteDoc := ` suite: test suite name templates: @@ -289,7 +289,7 @@ tests: testSuite := TestSuite{} yaml.Unmarshal([]byte(suiteDoc), &testSuite) - cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "my_test.yaml"), false) + cache, _ := snapshot.CreateSnapshotOfSuite(path.Join(tmpdir, "v3_subfolder_test.yaml"), false) suiteResult := testSuite.RunV3(c, cache, &TestSuiteResult{}) validateTestResultAndSnapshots(t, suiteResult, true, "test suite name", 1, 2, 2, 0, 0) diff --git a/unittest/utils.go b/unittest/utils.go index 3041e936..1c75105f 100644 --- a/unittest/utils.go +++ b/unittest/utils.go @@ -7,6 +7,9 @@ import ( "time" ) +// TestFramework the default name of the test framework. +const TestFramework = "helm-unittest" + func spliteChartRoutes(routePath string) []string { splited := strings.Split(routePath, string(filepath.Separator)) routes := make([]string, len(splited)/2+1) diff --git a/unittest/utils_test.go b/unittest/utils_test.go new file mode 100644 index 00000000..68d0b32c --- /dev/null +++ b/unittest/utils_test.go @@ -0,0 +1,77 @@ +package unittest_test + +import ( + "fmt" + "io/ioutil" + "os" + + . "github.com/lrills/helm-unittest/unittest" + "github.com/stretchr/testify/assert" +) + +// Most used test files +const testSuiteTests string = "_suite_tests" +const testOutputFile string = "../__fixtures__/output/test_output.xml" + +const testTestFiles string = "tests/*_test.yaml" +const testTestFailedFiles string = "tests_failed/*_test.yaml" + +const testV2BasicChart string = "../__fixtures__/v2/basic" +const testV2WithSubChart string = "../__fixtures__/v2/with-subchart" +const testV2WithSubFolderChart string = "../__fixtures__/v2/with-subfolder" +const testV3BasicChart string = "../__fixtures__/v3/basic" +const testV3WithSubChart string = "../__fixtures__/v3/with-subchart" +const testV3WithSubFolderChart string = "../__fixtures__/v3/with-subfolder" + +func createTestJobResult(name, errorMessage string, passed bool, assertionResults []*AssertionResult) *TestJobResult { + testJobResult := &TestJobResult{ + DisplayName: name, + Passed: passed, + } + + if len(errorMessage) > 0 { + testJobResult.ExecError = fmt.Errorf("%s", errorMessage) + } + + if assertionResults != nil { + testJobResult.AssertsResult = assertionResults + } + + return testJobResult +} + +func createAssertionResult(index int, passed, not bool, assertionType, failInfo, customInfo string) *AssertionResult { + return &AssertionResult{ + Index: index, + FailInfo: []string{failInfo}, + Passed: passed, + AssertType: assertionType, + Not: not, + CustomInfo: customInfo, + } +} + +func loadFormatterTestcase(assert *assert.Assertions, outputFile string, given []*TestSuiteResult, sut Formatter) []byte { + + writer, cerr := os.Create(outputFile) + assert.Nil(cerr) + + // Test the formatter + serr := sut.WriteTestOutput(given, false, writer) + assert.Nil(serr) + + // Don't defer, as we want to close it before stopping the test. + writer.Close() + + assert.FileExists(outputFile) + + // Unmarshall and validate the output with expected. + testResult, rerr := os.Open(outputFile) + assert.Nil(rerr) + bytevalue, _ := ioutil.ReadAll(testResult) + + testResult.Close() + os.Remove(outputFile) + + return bytevalue +} diff --git a/unittest/validators/common_test.go b/unittest/validators/common_test.go index a9810289..4bbefe99 100644 --- a/unittest/validators/common_test.go +++ b/unittest/validators/common_test.go @@ -21,3 +21,12 @@ func (m *mockSnapshotComparer) CompareToSnapshot(content interface{}) *snapshot. args := m.Called(content) return args.Get(0).(*snapshot.CompareResult) } + +// Most used test files +const testOutputFile string = "../__fixtures__/output/test_output.xml" +const testV2BasicChart string = "../__fixtures__/v2/basic" +const testV2WithSubChart string = "../__fixtures__/v2/with-subchart" +const testV2WithSubFolderChart string = "../__fixtures__/v2/with-subfolder" +const testV3BasicChart string = "../__fixtures__/v3/basic" +const testV3WithSubChart string = "../__fixtures__/v3/with-subchart" +const testV3WithSubFolderChart string = "../__fixtures__/v3/with-subfolder" diff --git a/unittest/valueutils/valueutils.go b/unittest/valueutils/valueutils.go index f35f6b0e..61c105a3 100644 --- a/unittest/valueutils/valueutils.go +++ b/unittest/valueutils/valueutils.go @@ -77,11 +77,11 @@ type fetchTraverser struct { } func (tr *fetchTraverser) traverseMapKey(key string) error { - if d, ok := tr.data.(map[interface{}]interface{}); ok { - tr.data = d[key] + if dmap, ok := tr.data.(map[interface{}]interface{}); ok { + tr.data = dmap[key] return nil - } else if d, ok := tr.data.(common.K8sManifest); ok { - tr.data = d[key] + } else if dman, ok := tr.data.(common.K8sManifest); ok { + tr.data = dman[key] return nil } return fmt.Errorf( diff --git a/unittest/xunit_report_xml.go b/unittest/xunit_report_xml.go index 90b062f0..7bcc3ce9 100644 --- a/unittest/xunit_report_xml.go +++ b/unittest/xunit_report_xml.go @@ -10,6 +10,9 @@ import ( "time" ) +// XUnitValidationMethod the default name for Helm XUnit validation. +const XUnitValidationMethod string = "Helm-Validation" + // XUnitAssemblies the top level of the document. type XUnitAssemblies struct { XMLName xml.Name `xml:"assemblies"` @@ -126,7 +129,7 @@ func (x *xUnitReportXML) WriteTestOutput(testSuiteResults []*TestSuiteResult, no ts := XUnitAssembly{ Name: testSuiteResult.FilePath, ConfigFile: testSuiteResult.FilePath, - TestFramework: "helm-unittest", + TestFramework: TestFramework, Environment: fmt.Sprintf("%s.%s-%s", runtime.Version(), runtime.GOOS, runtime.GOARCH), RunDate: formatDate(currentTime), RunTime: formatTime(currentTime), @@ -151,7 +154,7 @@ func (x *xUnitReportXML) WriteTestOutput(testSuiteResults []*TestSuiteResult, no Type: "Error", Name: "Error", Failure: &XUnitFailure{ - ExceptionType: "Helm-Validation-Error", + ExceptionType: fmt.Sprintf("%s-%s", XUnitValidationMethod, "Error"), Message: &XUnitFailureMessage{ Data: "Error", }, @@ -182,7 +185,7 @@ func (x *xUnitReportXML) WriteTestOutput(testSuiteResults []*TestSuiteResult, no testCase := XUnitTestCase{ Name: test.DisplayName, Type: classname, - Method: "Helm-Validation", + Method: XUnitValidationMethod, Time: formatDuration(test.Duration), Result: x.formatResult(test.Passed), Failure: nil, @@ -190,16 +193,8 @@ func (x *xUnitReportXML) WriteTestOutput(testSuiteResults []*TestSuiteResult, no // Write when a test is failed if !test.Passed { - // Update error count - if test.ExecError != nil { - ts.ErrorsTests++ - } else { - ts.FailedTests++ - ts.TestRuns[0].FailedTests++ - } - testCase.Failure = &XUnitFailure{ - ExceptionType: "Helm-Validation", + ExceptionType: XUnitValidationMethod, Message: &XUnitFailureMessage{ Data: "Failed", }, @@ -207,6 +202,15 @@ func (x *xUnitReportXML) WriteTestOutput(testSuiteResults []*TestSuiteResult, no Data: test.stringify(), }, } + + // Update error count and ExceptionType + if test.ExecError != nil { + ts.ErrorsTests++ + testCase.Failure.ExceptionType = fmt.Sprintf("%s-%s", XUnitValidationMethod, "Error") + } else { + ts.FailedTests++ + ts.TestRuns[0].FailedTests++ + } } else { ts.PassedTests++ ts.TestRuns[0].PassedTests++ diff --git a/unittest/xunit_report_xml_test.go b/unittest/xunit_report_xml_test.go index 418df83b..afc3bb7c 100644 --- a/unittest/xunit_report_xml_test.go +++ b/unittest/xunit_report_xml_test.go @@ -4,7 +4,6 @@ import ( "encoding/xml" "fmt" "io/ioutil" - "os" "path" "testing" @@ -12,7 +11,108 @@ import ( "github.com/stretchr/testify/assert" ) -var tmpXunitTestDir, _ = ioutil.TempDir("", "_suite_tests") +var tmpXunitTestDir, _ = ioutil.TempDir("", testSuiteTests) + +func createXUnitTestCase(name, description, failureContent string, isError bool) XUnitTestCase { + testCase := XUnitTestCase{ + Name: name, + Type: description, + Method: XUnitValidationMethod, + Result: "Pass", + } + + if len(failureContent) > 0 { + testCase.Failure = &XUnitFailure{ + ExceptionType: XUnitValidationMethod, + Message: &XUnitFailureMessage{ + Data: "Failed", + }, + StackTrace: &XUnitFailureStackTrace{ + Data: failureContent, + }, + } + testCase.Result = "Fail" + } + + if isError { + testCase.Failure.ExceptionType = fmt.Sprintf("%s-%s", XUnitValidationMethod, "Error") + } + + return testCase +} + +func assertXUnitTestAssemblies(assert *assert.Assertions, expected, actual []XUnitAssembly) { + + if expected != nil && actual != nil { + actualLength := len(actual) + assert.Equal(len(expected), actualLength) + + for i := 0; i < actualLength; i++ { + assert.Equal(expected[i].Name, actual[i].Name) + assert.Equal(expected[i].ConfigFile, actual[i].ConfigFile) + assert.Equal(expected[i].TotalTests, actual[i].TotalTests) + assert.Equal(expected[i].PassedTests, actual[i].PassedTests) + assert.Equal(expected[i].FailedTests, actual[i].FailedTests) + assert.Equal(expected[i].SkippedTests, actual[i].SkippedTests) + assert.Equal(expected[i].ErrorsTests, actual[i].ErrorsTests) + + // Validate the tesruns + assertXUnitTestRun(assert, expected[i].TestRuns, actual[i].TestRuns) + } + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected == nil && actual == nil) + } +} + +func assertXUnitTestRun(assert *assert.Assertions, expected, actual []XUnitTestRun) { + + if expected != nil && actual != nil { + actualLength := len(actual) + assert.Equal(len(expected), actualLength) + + for i := 0; i < actualLength; i++ { + assert.Equal(expected[i].Name, actual[i].Name) + assert.Equal(expected[i].TotalTests, actual[i].TotalTests) + assert.Equal(expected[i].PassedTests, actual[i].PassedTests) + assert.Equal(expected[i].FailedTests, actual[i].FailedTests) + assert.Equal(expected[i].SkippedTests, actual[i].SkippedTests) + + // Validate the testcases + assertXUnitTestCase(assert, expected[i].TestCases, actual[i].TestCases) + } + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected == nil && actual == nil) + } +} + +func assertXUnitTestCase(assert *assert.Assertions, expected, actual []XUnitTestCase) { + + if expected != nil && actual != nil { + actualLength := len(actual) + assert.Equal(len(expected), actualLength) + + for i := 0; i < actualLength; i++ { + assert.Equal(expected[i].Name, actual[i].Name) + assert.Equal(expected[i].Type, actual[i].Type) + assert.Equal(expected[i].Method, actual[i].Method) + assert.Equal(expected[i].Result, actual[i].Result) + + if expected[i].Failure != nil || actual[i].Failure != nil { + assert.Equal(expected[i].Failure.ExceptionType, actual[i].Failure.ExceptionType) + assert.Equal(expected[i].Failure.Message.Data, actual[i].Failure.Message.Data) + assert.Equal(expected[i].Failure.StackTrace.Data, actual[i].Failure.StackTrace.Data) + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected[i].Failure == nil && actual[i].Failure == nil) + } + } + } else { + // Verify if both are nil, otherwise it's still a failure. + assert.True(expected == nil && actual == nil) + } +} func TestWriteTestOutputAsXUnitMinimalSuccess(t *testing.T) { assert := assert.New(t) @@ -43,13 +143,7 @@ func TestWriteTestOutputAsXUnitMinimalSuccess(t *testing.T) { FailedTests: totalFailed, SkippedTests: totalSkipped, TestCases: []XUnitTestCase{ - { - Name: testCaseDisplayName, - Type: testSuiteDisplayName, - Method: "Helm-Validation", - Result: "Pass", - Failure: nil, - }, + createXUnitTestCase(testCaseDisplayName, testSuiteDisplayName, "", false), }, }, }, @@ -63,39 +157,18 @@ func TestWriteTestOutputAsXUnitMinimalSuccess(t *testing.T) { FilePath: outputFile, Passed: true, TestsResult: []*TestJobResult{ - { - DisplayName: testCaseDisplayName, - Passed: true, - }, + createTestJobResult(testCaseDisplayName, "", true, nil), }, }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewXUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual XUnitAssemblies xml.Unmarshal(bytevalue, &actual) assertXUnitTestAssemblies(assert, expected.Assembly, actual.Assembly) - - testResult.Close() - os.Remove(outputFile) } func TestWriteTestOutputAsXUnitWithFailures(t *testing.T) { @@ -132,28 +205,8 @@ func TestWriteTestOutputAsXUnitWithFailures(t *testing.T) { FailedTests: totalFailed, SkippedTests: totalSkipped, TestCases: []XUnitTestCase{ - { - Name: testCaseSuccessDisplayName, - Type: testSuiteDisplayName, - Method: "Helm-Validation", - Result: "Pass", - Failure: nil, - }, - { - Name: testCaseFailureDisplayName, - Type: testSuiteDisplayName, - Method: "Helm-Validation", - Result: "Fail", - Failure: &XUnitFailure{ - ExceptionType: "Helm-Validation", - Message: &XUnitFailureMessage{ - Data: "Failed", - }, - StackTrace: &XUnitFailureStackTrace{ - Data: failureContent, - }, - }, - }, + createXUnitTestCase(testCaseSuccessDisplayName, testSuiteDisplayName, "", false), + createXUnitTestCase(testCaseFailureDisplayName, testSuiteDisplayName, failureContent, false), }, }, }, @@ -161,60 +214,29 @@ func TestWriteTestOutputAsXUnitWithFailures(t *testing.T) { }, } + assertionResults := []*AssertionResult{ + createAssertionResult(0, false, false, assertionType, assertionFailure, ""), + } + given := []*TestSuiteResult{ { DisplayName: testSuiteDisplayName, FilePath: outputFile, Passed: false, TestsResult: []*TestJobResult{ - { - DisplayName: testCaseSuccessDisplayName, - Passed: true, - }, - { - DisplayName: testCaseFailureDisplayName, - Passed: false, - AssertsResult: []*AssertionResult{ - { - Index: 0, - FailInfo: []string{ - assertionFailure, - }, - Passed: false, - AssertType: assertionType, - Not: false, - }, - }, - }, + createTestJobResult(testCaseSuccessDisplayName, "", true, nil), + createTestJobResult(testCaseFailureDisplayName, "", false, assertionResults), }, }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewXUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual XUnitAssemblies xml.Unmarshal(bytevalue, &actual) assertXUnitTestAssemblies(assert, expected.Assembly, actual.Assembly) - - testResult.Close() - os.Remove(outputFile) } func TestWriteTestOutputAsXUnitWithFailuresAndErrors(t *testing.T) { @@ -254,43 +276,9 @@ func TestWriteTestOutputAsXUnitWithFailuresAndErrors(t *testing.T) { FailedTests: totalFailed, SkippedTests: totalSkipped, TestCases: []XUnitTestCase{ - { - Name: testCaseSuccessDisplayName, - Type: testSuiteDisplayName, - Method: "Helm-Validation", - Result: "Pass", - Failure: nil, - }, - { - Name: testCaseFailureDisplayName, - Type: testSuiteDisplayName, - Method: "Helm-Validation", - Result: "Fail", - Failure: &XUnitFailure{ - ExceptionType: "Helm-Validation", - Message: &XUnitFailureMessage{ - Data: "Failed", - }, - StackTrace: &XUnitFailureStackTrace{ - Data: failureContent, - }, - }, - }, - { - Name: testCaseErrorDisplayName, - Type: testSuiteDisplayName, - Method: "Helm-Validation", - Result: "Fail", - Failure: &XUnitFailure{ - ExceptionType: "Helm-Validation", - Message: &XUnitFailureMessage{ - Data: "Failed", - }, - StackTrace: &XUnitFailureStackTrace{ - Data: failureErrorContent, - }, - }, - }, + createXUnitTestCase(testCaseSuccessDisplayName, testSuiteDisplayName, "", false), + createXUnitTestCase(testCaseFailureDisplayName, testSuiteDisplayName, failureContent, false), + createXUnitTestCase(testCaseErrorDisplayName, testSuiteDisplayName, failureErrorContent, true), }, }, }, @@ -298,76 +286,30 @@ func TestWriteTestOutputAsXUnitWithFailuresAndErrors(t *testing.T) { }, } + assertionResults := []*AssertionResult{ + createAssertionResult(0, false, false, assertionType, assertionFailure, ""), + } + given := []*TestSuiteResult{ { DisplayName: testSuiteDisplayName, FilePath: outputFile, Passed: false, TestsResult: []*TestJobResult{ - { - DisplayName: testCaseSuccessDisplayName, - Passed: true, - }, - { - DisplayName: testCaseFailureDisplayName, - Passed: false, - AssertsResult: []*AssertionResult{ - { - Index: 0, - FailInfo: []string{ - assertionFailure, - }, - Passed: false, - AssertType: assertionType, - Not: false, - }, - }, - }, - { - DisplayName: testCaseErrorDisplayName, - Passed: false, - AssertsResult: []*AssertionResult{ - { - Index: 0, - FailInfo: []string{ - assertionFailure, - }, - Passed: false, - AssertType: assertionType, - Not: false, - }, - }, - ExecError: fmt.Errorf("%s", errorMessage), - }, + createTestJobResult(testCaseSuccessDisplayName, "", true, nil), + createTestJobResult(testCaseFailureDisplayName, "", false, assertionResults), + createTestJobResult(testCaseErrorDisplayName, errorMessage, false, assertionResults), }, }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewXUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual XUnitAssemblies xml.Unmarshal(bytevalue, &actual) assertXUnitTestAssemblies(assert, expected.Assembly, actual.Assembly) - - testResult.Close() - os.Remove(outputFile) } func TestWriteTestOutputAsXUnitWithErrors(t *testing.T) { @@ -396,7 +338,7 @@ func TestWriteTestOutputAsXUnitWithErrors(t *testing.T) { Type: "Error", Name: "Error", Failure: &XUnitFailure{ - ExceptionType: "Helm-Validation-Error", + ExceptionType: fmt.Sprintf("%s-%s", XUnitValidationMethod, "Error"), Message: &XUnitFailureMessage{ Data: "Failed", }, @@ -419,102 +361,11 @@ func TestWriteTestOutputAsXUnitWithErrors(t *testing.T) { }, } - writer, cerr := os.Create(outputFile) - assert.Nil(cerr) - - // Test the formatter sut := NewXUnitReportXML() - serr := sut.WriteTestOutput(given, false, writer) - assert.Nil(serr) - - // Don't defer, as we want to close it before stopping the test. - writer.Close() - - assert.FileExists(outputFile) - - // Unmarshall and validate the output with expected. - testResult, rerr := os.Open(outputFile) - assert.Nil(rerr) - bytevalue, _ := ioutil.ReadAll(testResult) + bytevalue := loadFormatterTestcase(assert, outputFile, given, sut) var actual XUnitAssemblies xml.Unmarshal(bytevalue, &actual) assertXUnitTestAssemblies(assert, expected.Assembly, actual.Assembly) - - testResult.Close() - os.Remove(outputFile) -} - -func assertXUnitTestAssemblies(assert *assert.Assertions, expected, actual []XUnitAssembly) { - - if expected != nil && actual != nil { - actualLength := len(actual) - assert.Equal(len(expected), actualLength) - - for i := 0; i < actualLength; i++ { - assert.Equal(expected[i].Name, actual[i].Name) - assert.Equal(expected[i].ConfigFile, actual[i].ConfigFile) - assert.Equal(expected[i].TotalTests, actual[i].TotalTests) - assert.Equal(expected[i].PassedTests, actual[i].PassedTests) - assert.Equal(expected[i].FailedTests, actual[i].FailedTests) - assert.Equal(expected[i].SkippedTests, actual[i].SkippedTests) - assert.Equal(expected[i].ErrorsTests, actual[i].ErrorsTests) - - // Validate the tesruns - assertXUnitTestRun(assert, expected[i].TestRuns, actual[i].TestRuns) - } - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected == nil && actual == nil) - } -} - -func assertXUnitTestRun(assert *assert.Assertions, expected, actual []XUnitTestRun) { - - if expected != nil && actual != nil { - actualLength := len(actual) - assert.Equal(len(expected), actualLength) - - for i := 0; i < actualLength; i++ { - assert.Equal(expected[i].Name, actual[i].Name) - assert.Equal(expected[i].TotalTests, actual[i].TotalTests) - assert.Equal(expected[i].PassedTests, actual[i].PassedTests) - assert.Equal(expected[i].FailedTests, actual[i].FailedTests) - assert.Equal(expected[i].SkippedTests, actual[i].SkippedTests) - - // Validate the testcases - assertXUnitTestCase(assert, expected[i].TestCases, actual[i].TestCases) - } - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected == nil && actual == nil) - } -} - -func assertXUnitTestCase(assert *assert.Assertions, expected, actual []XUnitTestCase) { - - if expected != nil && actual != nil { - actualLength := len(actual) - assert.Equal(len(expected), actualLength) - - for i := 0; i < actualLength; i++ { - assert.Equal(expected[i].Name, actual[i].Name) - assert.Equal(expected[i].Type, actual[i].Type) - assert.Equal(expected[i].Method, actual[i].Method) - assert.Equal(expected[i].Result, actual[i].Result) - - if expected[i].Failure != nil || actual[i].Failure != nil { - assert.Equal(expected[i].Failure.ExceptionType, actual[i].Failure.ExceptionType) - assert.Equal(expected[i].Failure.Message.Data, actual[i].Failure.Message.Data) - assert.Equal(expected[i].Failure.StackTrace.Data, actual[i].Failure.StackTrace.Data) - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected[i].Failure == nil && actual[i].Failure == nil) - } - } - } else { - // Verify if both are nil, otherwise it's still a failure. - assert.True(expected == nil && actual == nil) - } }