Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/workflows/docs.yml
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

rename file to test_and_docs, as workflow now does both

Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,12 @@ on:
types: [checks_requested]

jobs:
unit-tests:
uses: ./.github/workflows/tests.yml
secrets: inherit
build-docs:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add a job to do docs check, take a look how it is done in docs-as-code:
https://github.com/eclipse-score/docs-as-code/blob/main/.github/workflows/test_and_docs.yml

needs: unit-tests
if: ${{ always() }}
uses: eclipse-score/cicd-workflows/.github/workflows/docs.yml@main
permissions:
contents: write
Expand All @@ -41,3 +46,4 @@ jobs:
# the bazel-target depends on your repo specific docs_targets configuration (e.g. "suffix")
bazel-target: "//:docs -- --github_user=${{ github.repository_owner }} --github_repo=${{ github.event.repository.name }}"
retention-days: 3
tests-report-artifact: bazel-testlogs
6 changes: 4 additions & 2 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,11 @@

name: Tests
on:
pull_request_target:
pull_request:
types: [opened, reopened, synchronize]
merge_group:
types: [checks_requested]
workflow_call:
jobs:
test:
name: "Run tests"
Expand All @@ -25,4 +26,5 @@ jobs:
contents: read
pull-requests: read
with:
bazel-target: 'test //src/...'
bazel-target: 'test //src/... //tests/...'
upload-name: 'bazel-testlogs'
1 change: 1 addition & 0 deletions BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ use_format_targets()

docs(
data = [
"@score_platform//:needs_json",
"@score_process//:needs_json", # This allows linking to requirements (wp__requirements_comp, etc.) from the process_description repository.
],
source_dir = "docs",
Expand Down
8 changes: 7 additions & 1 deletion MODULE.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,16 @@ bazel_dep(name = "googletest", version = "1.17.0.bcr.1")

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason to not upgrade further?
As for example Platform & Tooling are also not on newest version

# S-CORE process rules
bazel_dep(name = "score_bazel_platforms", version = "0.0.4")
bazel_dep(name = "score_docs_as_code", version = "2.3.0")
bazel_dep(name = "score_docs_as_code", version = "2.3.3")
bazel_dep(name = "score_tooling", version = "1.1.0")
bazel_dep(name = "score_rust_policies", version = "0.0.3")

# Temporary workaround until everything is migrated to docs_as_code v3
single_version_override(
module_name = "score_docs_as_code",
version = "2.3.3",
)

Comment on lines +35 to +40

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not having the required properties inside your tests will result in errors / crashes in future versions if I remember correctly.
For the 2.3.3 you using here I think this might still be fine. But you will not have any links.
The links are only generated through 'PartiallyVerifies' & 'FullyVerifies' attributes.
If those are not at your tests we can't link it, just so you are aware.

bazel_dep(name = "score_process", version = "1.4.0", dev_dependency = True)
bazel_dep(name = "score_platform", version = "0.5.1", dev_dependency = True)

Expand Down
54 changes: 2 additions & 52 deletions MODULE.bazel.lock

Large diffs are not rendered by default.

48 changes: 48 additions & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html

import os
from pathlib import Path

from docutils import nodes
from docutils.parsers.rst import Directive

# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
Expand Down Expand Up @@ -54,3 +59,46 @@

# Enable numref
numfig = True


class DisplayTestLogs(Directive):
"""Find and display the raw content of all test.log files."""

def run(self):
env = self.state.document.settings.env
ws_root = Path(env.app.srcdir).parent

result_nodes = []
for dirname in ["bazel-testlogs", "tests-report"]:
candidate = ws_root / dirname
if not candidate.is_dir():
continue
for root, _, files in sorted(os.walk(candidate)):
Comment on lines +72 to +76
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

take a look into rglob method from pathlib, no need to mix it with os https://docs.python.org/3/library/pathlib.html#pathlib.Path.rglob

You can simplify it into something like

from itertools import chain

for log_file in chain((ws_root / "bazel-testlogs").rglob("test.log"), (ws_root / "tests-report").rglob("test.log")):
   ...

if "test.log" in files:
log_path = Path(root) / "test.log"
rel_path = log_path.relative_to(ws_root)

title = nodes.rubric(text=str(rel_path))
result_nodes.append(title)

try:
content = log_path.read_text(encoding="utf-8")
except Exception as e:
content = f"Error reading file: {e}"

code = nodes.literal_block(content, content)
code["language"] = "text"
code["source"] = str(rel_path)
result_nodes.append(code)

if not result_nodes:
para = nodes.paragraph(
text="No test.log files found in bazel-testlogs or tests-report."
)
result_nodes.append(para)

return result_nodes


def setup(app):
app.add_directive("display-test-logs", DisplayTestLogs)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This might be really slow at some point depending on how large the logs are and how many of these directives you have.

1 change: 1 addition & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ Lifecycle
:titlesonly:

module/*/index
statistics.rst

Overview
--------
Expand Down
122 changes: 122 additions & 0 deletions docs/statistics.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
.. _statistics:

Component Requirements Statistics
=================================

Overview
--------

.. needpie:: Requirements Status
:labels: not valid, valid but not tested, valid and tested
:colors: red, yellow, green

type == 'comp_req' and status == 'invalid'
type == 'comp_req' and testlink == '' and (status == 'valid' or status == 'invalid')
type == 'comp_req' and testlink != '' and (status == 'valid' or status == 'invalid')

In Detail
---------

.. grid:: 2
:class-container: score-grid

.. grid-item-card::

.. needpie:: Requirements marked as Valid
:labels: not valid, valid
:colors: red, orange, green

type == 'comp_req' and status == 'invalid'
type == 'comp_req' and status == 'valid'

.. grid-item-card::

.. needpie:: Requirements with Codelinks
:labels: no codelink, with codelink
:colors: red, green

type == 'comp_req' and source_code_link == ''
type == 'comp_req' and source_code_link != ''

.. grid-item-card::

.. needpie:: Test Results
:labels: passed, failed, skipped
:colors: green, red, orange

type == 'testcase' and result == 'passed'
type == 'testcase' and result == 'failed'
type == 'testcase' and result == 'skipped'

.. grid:: 2

.. grid-item-card::

Failed Tests

*Hint: This table should be empty. Before a PR can be merged all tests have to be successful.*

.. needtable:: FAILED TESTS
:filter: result == "failed"
:tags: TEST
:columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link"

.. grid-item-card::

Skipped / Disabled Tests

.. needtable:: SKIPPED/DISABLED TESTS
:filter: result != "failed" and result != "passed"
:tags: TEST
:columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link"




All passed Tests
-----------------

.. needtable:: SUCCESSFUL TESTS
:filter: result == "passed"
:tags: TEST
:columns: name as "testcase";result;fully_verifies;partially_verifies;test_type;derivation_technique;id as "link"


Details About Testcases
------------------------

.. needpie:: Test Types Used In Testcases
:labels: static-code-analysis, structural-statement-coverage, structural-branch-coverage, walkthrough, inspection, interface-test, requirements-based, resource-usage, control-flow-analysis, data-flow-analysis, fault-injection, struct-func-cov, struct-call-cov
:legend:

type == 'testcase' and test_type == 'static-code-analysis'
type == 'testcase' and test_type == 'structural-statement-coverage'
type == 'testcase' and test_type == 'structural-branch-coverage'
type == 'testcase' and test_type == 'walkthrough'
type == 'testcase' and test_type == 'inspection'
type == 'testcase' and test_type == 'interface-test'
type == 'testcase' and test_type == 'requirements-based'
type == 'testcase' and test_type == 'resource-usage'
type == 'testcase' and test_type == 'control-flow-analysis'
type == 'testcase' and test_type == 'data-flow-analysis'
type == 'testcase' and test_type == 'fault-injection'
type == 'testcase' and test_type == 'struct-func-cov'
type == 'testcase' and test_type == 'struct-call-cov'


.. needpie:: Derivation Techniques Used In Testcases
:labels: requirements-analysis, boundary-values, equivalence-classes, fuzz-testing, error-guessing, explorative-testing
:legend:

type == 'testcase' and derivation_technique == 'requirements-analysis'
type == 'testcase' and derivation_technique == 'boundary-values'
type == 'testcase' and derivation_technique == 'equivalence-classes'
type == 'testcase' and derivation_technique == 'fuzz-testing'
type == 'testcase' and derivation_technique == 'error-guessing'
type == 'testcase' and derivation_technique == 'explorative-testing'


Test Log Files
--------------

.. display-test-logs::
7 changes: 7 additions & 0 deletions src/health_monitoring_lib/cpp/tests/health_monitor_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,18 @@ using ::testing::_;

class HealthMonitorTest : public ::testing::Test
{
protected:
void SetUp() override
{
RecordProperty("TestType", "requirements-based");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

general comment to all cpp tests:

If its requirements-based why there is no mapping to the requirement that is tested by this testcase?

RecordProperty("DerivationTechnique", "requirements-analysis");
}
};

// For first review round, only single test case to show up API
TEST_F(HealthMonitorTest, TestName)
{
RecordProperty("Description", "This test demonstrates the usage of HealthMonitor and DeadlineMonitor APIs. It creates a HealthMonitor with a DeadlineMonitor, retrieves the DeadlineMonitor, and tests starting a deadline.");
auto builder_mon = deadline::DeadlineMonitorBuilder()
.add_deadline(IdentTag("deadline_1"),
TimeRange(std::chrono::milliseconds(100), std::chrono::milliseconds(200)))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ using score::lcm::ProcessStateReceiver;
class ProcessStateClient_UT : public ::testing::Test {
protected:
void SetUp() override {
RecordProperty("TestType", "requirements-based");
RecordProperty("DerivationTechnique", "requirements-analysis");
notifier_ = std::make_unique<ProcessStateNotifier>();
receiver_ = notifier_->constructReceiver();
}
Expand All @@ -37,11 +39,13 @@ class ProcessStateClient_UT : public ::testing::Test {
};

TEST_F(ProcessStateClient_UT, ProcessStateClient_ConstructReceiver_Succeeds) {
RecordProperty("Description", "This test verifies that the ProcessStateNotifier can successfully construct a ProcessStateReceiver instance.");
ASSERT_NE(notifier_, nullptr);
ASSERT_NE(receiver_, nullptr);
}

TEST_F(ProcessStateClient_UT, ProcessStateClient_QueueOneProcess_Succeeds) {
RecordProperty("Description", "This test verifies that a single PosixProcess can be successfully queued using the ProcessStateNotifier and retrieved using the ProcessStateReceiver.");
PosixProcess process1{
.id = score::lcm::IdentifierHash("Process1"),
.processStateId = score::lcm::ProcessState::kRunning,
Expand All @@ -67,6 +71,7 @@ TEST_F(ProcessStateClient_UT, ProcessStateClient_QueueOneProcess_Succeeds) {
}

TEST_F(ProcessStateClient_UT, ProcessStateClient_QueueMaxNumberOfProcesses_Succeeds) {
RecordProperty("Description", "This test verifies that the ProcessStateNotifier can successfully queue the maximum number of PosixProcess instances defined by the buffer size, and that they can be retrieved using the ProcessStateReceiver.");
// Queue maximum number of processes
for (size_t i = 0; i < static_cast<size_t>(BufferConstants::BUFFER_QUEUE_SIZE); ++i) {
PosixProcess process{
Expand All @@ -93,6 +98,7 @@ TEST_F(ProcessStateClient_UT, ProcessStateClient_QueueMaxNumberOfProcesses_Succe
}

TEST_F(ProcessStateClient_UT, ProcessStateClient_QueueOneProcessTooMany_Fails) {
RecordProperty("Description", "This test verifies that attempting to queue a PosixProcess when the buffer is already at maximum capacity results in a failure, and that no additional processes can be retrieved from the receiver.");
PosixProcess process1{
.id = score::lcm::IdentifierHash("Process1"),
.processStateId = score::lcm::ProcessState::kRunning,
Expand Down
36 changes: 31 additions & 5 deletions tests/ut/identifier_hash_UT/identifier_hash_UT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,34 +20,56 @@ using std::stringstream;

using score::lcm::IdentifierHash;

TEST(IdentifierHashTest, IdentifierHash_with_string_view_created)
class IdentifierHashTest : public ::testing::Test
{
protected:
void SetUp() override
{
RecordProperty("TestType", "requirements-based");
RecordProperty("DerivationTechnique", "requirements-analysis");
}
};

TEST_F(IdentifierHashTest, IdentifierHash_with_string_view_created)
{
RecordProperty("Description",
"This test verifies that an IdentifierHash can be successfully created using a std::string_view, "
"and that its string representation can be retrieved correctly.");
std::string_view idStrView = "ProcessGroup1/Startup";
IdentifierHash identifierHash(idStrView);
stringstream strStream;
strStream << identifierHash;
ASSERT_EQ(strStream.str(), idStrView);
}

TEST(IdentifierHashTest, IdentifierHash_with_string_created)
TEST_F(IdentifierHashTest, IdentifierHash_with_string_created)
{
RecordProperty("Description",
"This test verifies that an IdentifierHash can be successfully created using a std::string, and "
"that its string representation can be retrieved correctly.");
std::string idStr = "ProcessGroup1/Startup";
IdentifierHash identifierHash(idStr);
stringstream strStream;
strStream << identifierHash;
ASSERT_EQ(strStream.str(), idStr);
}

TEST(IdentifierHashTest, IdentifierHash_default_created)
TEST_F(IdentifierHashTest, IdentifierHash_default_created)
{
RecordProperty("Description",
"This test verifies that a default-constructed IdentifierHash can be created, and that its string "
"representation is empty.");
IdentifierHash identifierHash;
stringstream strStream;
strStream << identifierHash;
ASSERT_EQ(strStream.str(), "");
}

TEST(IdentifierHashTest, IdentifierHash_invalid_hash_no_string_representation)
TEST_F(IdentifierHashTest, IdentifierHash_invalid_hash_no_string_representation)
{
RecordProperty("Description",
"This test verifies that if an IdentifierHash is created with a string that is not registered in "
"the registry, its string representation indicates that it is unknown and includes the hash value.");
std::string idStr = "MainFG";
IdentifierHash identifierHash(idStr);

Expand All @@ -60,8 +82,12 @@ TEST(IdentifierHashTest, IdentifierHash_invalid_hash_no_string_representation)
ASSERT_TRUE(strStream.str().find(std::to_string(identifierHash.data())) != std::string::npos);
}

TEST(IdentifierHashTest, IdentifierHash_no_dangling_pointer_after_source_string_dies)
TEST_F(IdentifierHashTest, IdentifierHash_no_dangling_pointer_after_source_string_dies)
{
RecordProperty("Description",
"This test verifies that an IdentifierHash created from a std::string does not have a dangling "
"pointer to the original string after it goes out of scope, and that its string representation can "
"still be retrieved correctly.");
std::unique_ptr<IdentifierHash> hash_ptr;
std::string_view idStrView = "this string will be destroyed";

Expand Down
Loading