diff --git a/github_pages/static/index.md b/github_pages/static/index.md
index e9fbbc8a63..66e2bbbfd1 100644
--- a/github_pages/static/index.md
+++ b/github_pages/static/index.md
@@ -23,6 +23,7 @@ These reports were generated during continuous integration for the most recent P
* [Sequence view](./artifacts/uss_qualifier/reports/f3548_self_contained/sequence)
* [Tested requirements](./artifacts/uss_qualifier/reports/f3548_self_contained/gate3)
+* [Globally-expanded report](./artifacts/uss_qualifier/reports/f3548_self_contained/globally_expanded/report.html)
### [US UTM Implementation test configuration](https://github.com/interuss/monitoring/blob/main/monitoring/uss_qualifier/configurations/dev/utm_implementation_us.yaml)
diff --git a/monitoring/uss_qualifier/common_data_definitions.py b/monitoring/uss_qualifier/common_data_definitions.py
index 8983f1594c..6ce18f2a20 100644
--- a/monitoring/uss_qualifier/common_data_definitions.py
+++ b/monitoring/uss_qualifier/common_data_definitions.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
from enum import Enum
+from typing import List
class Severity(str, Enum):
@@ -94,3 +97,7 @@ def symbol(self) -> str:
Severity.High.value: "🛑",
Severity.Critical.value: "☢",
}.get(self.value, "�")
+
+ @staticmethod
+ def all_values() -> List[Severity]:
+ return [Severity.Low, Severity.Medium, Severity.High, Severity.Critical]
diff --git a/monitoring/uss_qualifier/configurations/configuration.py b/monitoring/uss_qualifier/configurations/configuration.py
index 3afc03498b..5b591ef700 100644
--- a/monitoring/uss_qualifier/configurations/configuration.py
+++ b/monitoring/uss_qualifier/configurations/configuration.py
@@ -213,6 +213,11 @@ class RawReportConfiguration(ImplicitDict):
"""To pretty-print JSON content, specify an indent level (generally 2), or omit or set to None to write compactly."""
+class GloballyExpandedReportConfiguration(ImplicitDict):
+ redact_access_tokens: bool = True
+ """When True, look for instances of "Authorization" keys in the report with values starting "Bearer " and redact the signature from those access tokens"""
+
+
class ArtifactsConfiguration(ImplicitDict):
raw_report: Optional[RawReportConfiguration] = None
"""Configuration for raw report generation"""
@@ -229,6 +234,9 @@ class ArtifactsConfiguration(ImplicitDict):
sequence_view: Optional[SequenceViewConfiguration] = None
"""If specified, configuration describing a desired report describing the sequence of events that occurred during the test"""
+ globally_expanded_report: Optional[GloballyExpandedReportConfiguration] = None
+ """If specified, configuration describing a desired report mimicking what might be seen had the test run been conducted manually."""
+
class USSQualifierConfigurationV1(ImplicitDict):
test_run: Optional[TestConfiguration] = None
diff --git a/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml b/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml
index 9f4b141d8a..5e2753d55a 100644
--- a/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml
+++ b/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml
@@ -403,6 +403,9 @@ v1:
# Write out a human-readable report showing the sequence of events of the test
sequence_view: {}
+ # Write out a long-form report mimicking if the test run was performed manually
+ globally_expanded_report: {}
+
# This block defines whether to return an error code from the execution of uss_qualifier, based on the content of the
# test run report. All of the criteria must be met to return a successful code.
validation:
diff --git a/monitoring/uss_qualifier/make_artifacts.py b/monitoring/uss_qualifier/make_artifacts.py
index 3f6b97d6e8..c0518899c0 100644
--- a/monitoring/uss_qualifier/make_artifacts.py
+++ b/monitoring/uss_qualifier/make_artifacts.py
@@ -7,6 +7,8 @@
from implicitdict import ImplicitDict
from loguru import logger
+from monitoring.monitorlib import inspection
+from monitoring import uss_qualifier as uss_qualifier_module
from monitoring.uss_qualifier.configurations.configuration import (
USSQualifierConfiguration,
USSQualifierConfigurationV1,
@@ -61,6 +63,8 @@ def main() -> int:
else:
config_names = [config_in_report] * len(report_paths)
+ inspection.import_submodules(uss_qualifier_module)
+
for config_name, report_path in zip(config_names, report_paths):
logger.info(
f"========== Generating artifacts for configuration {config_name} and report {report_path} =========="
diff --git a/monitoring/uss_qualifier/reports/README.md b/monitoring/uss_qualifier/reports/README.md
index 316d5da608..e1038b2742 100644
--- a/monitoring/uss_qualifier/reports/README.md
+++ b/monitoring/uss_qualifier/reports/README.md
@@ -21,3 +21,7 @@ The [tested requirements artifact](./tested_requirements) summarizes a test run'
### Sequence view
The [sequence view artifact](./sequence_view) is a human-readable description/log of what happened during a test run. This artifact is a good starting point to understand or debug what happened during a test run.
+
+### Globally-expanded report
+
+The [globally-expanded report artifact](./globally_expanded/README.md) assembles procedural information about the test run into a single, flat presentation, mimicking what might be seen as output had the automated test been performed manually.
diff --git a/monitoring/uss_qualifier/reports/artifacts.py b/monitoring/uss_qualifier/reports/artifacts.py
index 9072cfa1b1..6c1b4d3b04 100644
--- a/monitoring/uss_qualifier/reports/artifacts.py
+++ b/monitoring/uss_qualifier/reports/artifacts.py
@@ -7,6 +7,9 @@
from implicitdict import ImplicitDict
from monitoring.uss_qualifier.configurations.configuration import ArtifactsConfiguration
from monitoring.uss_qualifier.reports.documents import make_report_html
+from monitoring.uss_qualifier.reports.globally_expanded.generate import (
+ generate_globally_expanded_report,
+)
from monitoring.uss_qualifier.reports.report import TestRunReport, redact_access_tokens
from monitoring.uss_qualifier.reports.sequence_view.generate import (
generate_sequence_view,
@@ -87,3 +90,16 @@ def _should_redact(cfg) -> bool:
redacted_report if _should_redact(artifacts.sequence_view) else report
)
generate_sequence_view(report_to_write, artifacts.sequence_view, path)
+
+ if artifacts.globally_expanded_report:
+ # Globally-expanded report
+ path = os.path.join(output_path, "globally_expanded")
+ logger.info(f"Writing globally-expanded report to {path}")
+ report_to_write = (
+ redacted_report
+ if _should_redact(artifacts.globally_expanded_report)
+ else report
+ )
+ generate_globally_expanded_report(
+ report_to_write, artifacts.globally_expanded_report, path
+ )
diff --git a/monitoring/uss_qualifier/reports/globally_expanded/README.md b/monitoring/uss_qualifier/reports/globally_expanded/README.md
new file mode 100644
index 0000000000..67fa00cdce
--- /dev/null
+++ b/monitoring/uss_qualifier/reports/globally_expanded/README.md
@@ -0,0 +1,3 @@
+# Globally-expanded report artifact
+
+The globally-expanded report artifact assembles procedural information about the test run into a single, flat presentation. It is intended to mimic what might be seen as output had the automated test been performed manually.
diff --git a/monitoring/uss_qualifier/reports/globally_expanded/__init__.py b/monitoring/uss_qualifier/reports/globally_expanded/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/monitoring/uss_qualifier/reports/globally_expanded/generate.py b/monitoring/uss_qualifier/reports/globally_expanded/generate.py
new file mode 100644
index 0000000000..d71c975754
--- /dev/null
+++ b/monitoring/uss_qualifier/reports/globally_expanded/generate.py
@@ -0,0 +1,556 @@
+import os
+from dataclasses import dataclass
+from typing import Iterator, Sequence, List
+
+import marko
+import marko.element
+from marko.ext.gfm import gfm
+from marko.md_renderer import MarkdownRenderer
+
+from monitoring.monitorlib.versioning import get_code_version, repo_url_of
+from monitoring.uss_qualifier.common_data_definitions import Severity
+from monitoring.uss_qualifier.configurations.configuration import (
+ GloballyExpandedReportConfiguration,
+)
+from monitoring.uss_qualifier.documentation import text_of
+from monitoring.uss_qualifier.reports.report import TestRunReport
+from monitoring.uss_qualifier.reports.sequence_view.generate import (
+ compute_action_node,
+ make_resources_config,
+)
+from monitoring.uss_qualifier.reports.sequence_view.summary_types import (
+ Indexer,
+ ActionNode,
+ ActionNodeType,
+ TestedScenario,
+ EpochType,
+ TestedCase,
+ TestedStep,
+ EventType,
+)
+from monitoring.uss_qualifier.reports.tested_requirements.summaries import (
+ compute_test_run_information,
+)
+from monitoring.uss_qualifier.scenarios.documentation.parsing import (
+ get_documentation_by_name,
+)
+
+
+@dataclass
+class _Section:
+ title: str
+ body: str
+
+
+def generate_globally_expanded_report(
+ report: TestRunReport, config: GloballyExpandedReportConfiguration, output_path: str
+) -> None:
+ node = compute_action_node(report.report, Indexer())
+ test_run = compute_test_run_information(report)
+
+ sections = list(_generate_sections(node))
+
+ ger = (
+ (
+ (
+ f"""# Test run {test_run.test_run_id[0:7]}
+
+## Table of contents
+
+### Setup
+
+* Test run information
+* Resource pool
+
+### Test scenarios
+"""
+ )
+ + "\n".join(f"* {section.title}" for section in sections)
+ )
+ + "\n\n"
+ )
+
+ ger += f"""## Test run information
+
+
+
+ Test characteristic |
+ Value |
+
+
+ Test run identifier |
+ TR-{ test_run.test_run_id[0:7] } |
+
+
+ Start time |
+ { test_run.start_time if test_run.start_time else "" } |
+
+
+ End time |
+ { test_run.end_time if test_run.end_time else "" } |
+
+
+ Test baseline identifier |
+ TB-{ test_run.baseline[0:7] } |
+
+
+ Environment identifier |
+ TE-{ test_run.environment[0:7] } |
+
+
+ Codebase version |
+ { report.codebase_version } |
+
+
+ Commit hash |
+ { report.commit_hash } |
+
+
+
+_This artifact was generated by { get_code_version() }_
+
+"""
+
+ resource_pool = make_resources_config(report.configuration.v1.test_run)
+
+ def indented_ul(value) -> List[str]:
+ result = []
+ if isinstance(value, dict):
+ for k, v in value.items():
+ vlines = indented_ul(v)
+ if len(vlines) == 1 and (not vlines[0] or vlines[0].strip()[0] != "*"):
+ vstr = vlines[0] if vlines[0] else '""'
+ result.append(f"* {k}: {vstr}")
+ else:
+ result.append(f"* {k}")
+ result.extend(f" {l}" for l in vlines)
+ elif isinstance(value, str):
+ result.append(value)
+ elif isinstance(value, list):
+ if len(value) == 0:
+ result.append("[]")
+ elif len(value) == 1:
+ vlines = indented_ul(value[0])
+ if len(vlines) == 1 and vlines[0] != "*":
+ result.append(f"[ {vlines[0]} ]")
+ else:
+ result.append("* Item 1:")
+ result.extend(f" {l}" for l in vlines)
+ else:
+ for i, v in enumerate(value):
+ vlines = indented_ul(v)
+ result.append(f"* Item {i + 1}:")
+ result.extend(f" {l}" for l in vlines)
+ else:
+ result.append(str(value))
+ return result
+
+ def describe_pool_resource(k: str, v: dict) -> str:
+ return f"#### {k}\n\n" + "\n".join(indented_ul(v))
+
+ ger += "## Resource pool"
+
+ if "Environment" in resource_pool and resource_pool["Environment"]:
+ ger += f"\n\n### Environment\n\n" + "\n".join(
+ describe_pool_resource(k, v)
+ for k, v in resource_pool["Environment"].items()
+ )
+
+ if "Baseline" in resource_pool and resource_pool["Baseline"]:
+ ger += f"\n\n### Baseline\n\n" + "\n".join(
+ describe_pool_resource(k, v) for k, v in resource_pool["Baseline"].items()
+ )
+
+ ger += "\n\n"
+
+ ger += "\n\n".join(f"## {section.title}\n\n{section.body}" for section in sections)
+
+ os.makedirs(output_path, exist_ok=True)
+ with open(os.path.join(output_path, "report.html"), "w") as f:
+ f.write("\n")
+ with open(
+ os.path.join(
+ os.path.dirname(__file__), "../templates/globally_expanded/style.html"
+ ),
+ "r",
+ ) as style:
+ f.write(style.read())
+ f.write("\n")
+ f.write(gfm.render(gfm.parse(ger)))
+ f.write("\n")
+ f.write("\n")
+
+
+def _generate_sections(node: ActionNode) -> Iterator[_Section]:
+ if node.node_type == ActionNodeType.Scenario:
+ yield _generate_scenario_section(node.scenario)
+ elif node.node_type == ActionNodeType.SkippedAction:
+ yield _generate_skipped_scenario_section(node)
+ else:
+ for child in node.children:
+ for subsection in _generate_sections(child):
+ yield subsection
+
+
+def _generate_skipped_scenario_section(node: ActionNode) -> _Section:
+ return _Section(
+ title=f"[skipped] {node.name}",
+ body=f"This instance of this test scenario was skipped in this test run because: {node.skipped_action.reason}",
+ )
+
+
+def _generate_scenario_section(scenario: TestedScenario) -> _Section:
+ doc_summary = get_documentation_by_name(scenario.type)
+ with open(doc_summary.local_path, "r") as f:
+ doc = marko.parse(f.read())
+
+ _modify_scenario_documentation(
+ doc, os.path.abspath(doc_summary.local_path), scenario
+ )
+
+ renderer = MarkdownRenderer()
+ return _Section(
+ title=f"[{scenario.scenario_index}] {scenario.name}",
+ body=renderer.render_children(doc),
+ )
+
+
+def _modify_scenario_documentation(
+ doc: marko.block.Document, origin_filename: str, scenario: TestedScenario
+) -> None:
+ _remove_top_heading(doc)
+ _indent_headings(doc.children, 1)
+ _inflate_fragments(doc, origin_filename)
+ _add_resource_origins(doc, scenario)
+ _add_context_to_scenario(doc, scenario)
+ _update_links(doc, origin_filename)
+
+
+def _remove_top_heading(doc: marko.block.Document) -> None:
+ for c in range(len(doc.children)):
+ if isinstance(doc.children[c], marko.block.Heading):
+ doc.children = doc.children[c + 1 :]
+ return
+
+
+def _indent_headings(elements: Sequence[marko.element.Element], levels: int) -> None:
+ for element in elements:
+ if isinstance(element, marko.block.Heading):
+ element.level = min(element.level + levels, 6)
+ if hasattr(element, "children") and element.children:
+ _indent_headings(element.children, levels)
+
+
+def _inflate_fragments(parent: marko.element.Element, origin_filename: str) -> None:
+ if hasattr(parent, "children") and parent.children:
+ c = 0
+ while c < len(parent.children):
+ child = parent.children[c]
+ if (
+ isinstance(child, marko.block.Heading)
+ and hasattr(child, "children")
+ and len(child.children) > 0
+ and isinstance(child.children[0], marko.block.inline.Link)
+ ):
+ # This is a heading with a link in it, so we infer this is a linked test step fragment
+ doc_filename = child.children[0].dest
+ absolute_path = os.path.abspath(
+ os.path.join(os.path.dirname(origin_filename), doc_filename)
+ )
+ doc = _get_test_step_fragment(absolute_path, child.level)
+ _update_links(doc, absolute_path)
+ _strip_link(child)
+ parent.children = (
+ parent.children[0 : c + 1] + doc.children + parent.children[c + 1 :]
+ )
+ c += len(doc.children)
+ elif isinstance(child, marko.element.Element):
+ _inflate_fragments(parent.children[c], origin_filename)
+ c += 1
+ else:
+ c += 1
+
+
+def _add_resource_origins(doc: marko.block.Document, scenario: TestedScenario) -> None:
+ resource_heading_level = None
+ current_resource = None
+ current_resource_i0 = None
+ c = 0
+
+ def add_resource_origin():
+ nonlocal c, current_resource, current_resource_i0
+ if current_resource is None:
+ return
+ if current_resource not in scenario.resource_origins:
+ # This resource wasn't provided for this scenario instance; remove resource definition
+ doc.children = doc.children[0:current_resource_i0] + doc.children[c:]
+ c = current_resource_i0
+ return
+ # Insert resource origin information
+ origin = marko.parse(
+ f"\n\nProvided by {scenario.resource_origins[current_resource]}.\n"
+ )
+ doc.children = doc.children[0:c] + origin.children + doc.children[c:]
+ c += len(origin.children)
+
+ while c < len(doc.children):
+ child = doc.children[c]
+ if isinstance(child, marko.block.Heading) and text_of(child) == "Resources":
+ # We just found the resources section
+ resource_heading_level = child.level
+ c += 1
+ elif resource_heading_level is None:
+ # We're not yet in the resources section
+ c += 1
+ elif (
+ resource_heading_level is not None
+ and isinstance(child, marko.block.Heading)
+ and child.level <= resource_heading_level
+ ):
+ # This is the heading ending the resources section, so we're done
+ break
+ elif (
+ isinstance(child, marko.block.Heading)
+ and child.level == resource_heading_level + 1
+ ):
+ # This is the heading defining a resource
+ add_resource_origin()
+ current_resource = text_of(child)
+ current_resource_i0 = c
+ c += 1
+ else:
+ c += 1
+ add_resource_origin()
+
+
+def _strip_link(element: marko.element.Element) -> None:
+ if hasattr(element, "children") and element.children:
+ for c in range(len(element.children)):
+ child = element.children[c]
+ if isinstance(child, marko.block.inline.Link):
+ element.children[c] = child.children[0]
+ elif isinstance(child, marko.element.Element):
+ _strip_link(child)
+
+
+def _get_test_step_fragment(
+ absolute_path: str, parent_level: int
+) -> marko.block.Document:
+ with open(absolute_path, "r") as f:
+ doc = marko.parse(f.read())
+
+ _remove_top_heading(doc)
+ _indent_headings(doc.children, parent_level - 1)
+ _inflate_fragments(doc, absolute_path)
+
+ return doc
+
+
+def _add_context_to_scenario(
+ doc: marko.block.Document, scenario: TestedScenario
+) -> None:
+ test_case = None
+ test_case_i0 = None
+ test_case_level = None
+ cleanup = False
+ c = 0
+
+ def add_context_to_case():
+ nonlocal c, cleanup, test_case, test_case_i0, test_case_level
+ if test_case_i0 is not None:
+ if test_case is not None:
+ if cleanup:
+ c += _add_context_to_step(doc, test_case.steps[0], test_case_i0, c)
+ else:
+ c += _add_context_to_case(doc, test_case, test_case_i0, c)
+ else:
+ # This test case wasn't executed this test run; remove it
+ doc.children = doc.children[0:test_case_i0] + doc.children[c:]
+ c = test_case_i0
+ test_case = None
+ test_case_i0 = None
+ test_case_level = None
+ cleanup = False
+
+ while c < len(doc.children):
+ child = doc.children[c]
+ if isinstance(child, marko.block.Heading) and text_of(child).endswith(
+ " test case"
+ ):
+ # This is the beginning of a test case
+ add_context_to_case()
+
+ case_name = text_of(child)[: -len(" test case")]
+ test_case_i0 = c
+ test_case_level = child.level
+ for epoch in scenario.epochs:
+ if epoch.type != EpochType.Case:
+ continue
+ if case_name == epoch.case.name:
+ test_case = epoch.case
+ break
+ elif isinstance(child, marko.block.Heading) and text_of(child) == "Cleanup":
+ # This is the beginning of the cleanup section
+ add_context_to_case()
+
+ test_case_i0 = c
+ test_case_level = child.level
+ cleanup = True
+ for epoch in scenario.epochs:
+ if epoch.type != EpochType.Case:
+ continue
+ if len(epoch.case.steps) == 1 and epoch.case.steps[0].name == "Cleanup":
+ test_case = epoch.case
+ break
+ elif (
+ test_case_level is not None
+ and isinstance(child, marko.block.Heading)
+ and child.level <= test_case_level
+ ):
+ # This heading ends the current test case section
+ add_context_to_case()
+ c += 1
+ add_context_to_case()
+
+
+def _add_context_to_case(
+ doc: marko.block.Document, case: TestedCase, i0: int, i1: int
+) -> int:
+ test_step = None
+ test_step_i0 = None
+ test_step_level = None
+ c = i0
+ added = 0
+
+ def add_context_to_step():
+ nonlocal c, i1, added, test_step, test_step_i0, test_step_level
+ if test_step_i0 is not None:
+ if test_step is not None:
+ dc = _add_context_to_step(doc, test_step, test_step_i0, c)
+ else:
+ # This test step wasn't executed this test run; remove it
+ dc = test_step_i0 - c
+ doc.children = doc.children[0:test_step_i0] + doc.children[c:]
+ c += dc
+ i1 += dc
+ added += dc
+ test_step = None
+ test_step_i0 = None
+ test_step_level = None
+
+ while c < i1:
+ child = doc.children[c]
+ if isinstance(child, marko.block.Heading) and text_of(child).endswith(
+ " test step"
+ ):
+ # This is the beginning of a test step
+ add_context_to_step()
+
+ step_name = text_of(child)[: -len(" test step")]
+ test_step_i0 = c
+ test_step_level = child.level
+ for step in case.steps:
+ if step_name == step.name:
+ test_step = step
+ break
+ elif (
+ test_step_level is not None
+ and isinstance(child, marko.block.Heading)
+ and child.level <= test_step_level
+ ):
+ # This heading ends the current test case section
+ add_context_to_step()
+ c += 1
+ add_context_to_step()
+ return added
+
+
+def _add_context_to_step(
+ doc: marko.block.Document, step: TestedStep, i0: int, i1: int
+) -> int:
+ test_check_name = None
+ test_check_i0 = None
+ test_check_level = None
+ c = i0
+ added = 0
+
+ def add_context_to_check():
+ nonlocal c, i1, added, test_check_name, test_check_i0, test_check_level
+ # TODO: Remove checks which were not actually performed in this test run
+ if test_check_name is not None:
+ dc = _add_context_to_check(doc, step, test_check_name, test_check_i0, c)
+ c += dc
+ i1 += dc
+ added += dc
+ test_check_name = None
+ test_check_i0 = None
+ test_check_level = None
+
+ while c < i1:
+ child = doc.children[c]
+ if isinstance(child, marko.block.Heading) and text_of(child).endswith(" check"):
+ # This is the beginning of a test check
+ add_context_to_check()
+
+ test_check_name = text_of(child)[: -len(" check")]
+ for sev in Severity.all_values():
+ prefix = sev.symbol + " "
+ if test_check_name.startswith(prefix):
+ test_check_name = test_check_name[len(prefix) :]
+ test_check_i0 = c
+ test_check_level = child.level
+ elif (
+ test_check_level is not None
+ and isinstance(child, marko.block.Heading)
+ and child.level <= test_check_level
+ ):
+ # This heading ends the current test check section
+ add_context_to_check()
+ c += 1
+ add_context_to_check()
+ return added
+
+
+def _add_context_to_check(
+ doc: marko.block.Document, step: TestedStep, test_check_name: str, i0: int, i1: int
+) -> int:
+ check_text = [""]
+ for event in step.events:
+ if (
+ event.type == EventType.PassedCheck
+ and event.passed_check.name == test_check_name
+ ):
+ check_text.append(
+ f"✅ {', '.join(event.passed_check.participants)} ({event.passed_check.timestamp})"
+ )
+ elif (
+ event.type == EventType.FailedCheck
+ and event.failed_check.name == test_check_name
+ ):
+ check_text.append(
+ f"❌ {', '.join(event.failed_check.participants)} ({event.failed_check.timestamp})"
+ )
+ additions = marko.parse("\n\n".join(check_text))
+ doc.children = doc.children[0:i1] + additions.children + doc.children[i1:]
+ return len(additions.children)
+
+
+def _update_links(element: marko.element.Element, origin_filename: str) -> None:
+ if isinstance(element, marko.block.inline.Link) or isinstance(
+ element, marko.block.inline.Image
+ ):
+ if not element.dest.startswith("http://") and not element.dest.startswith(
+ "https://"
+ ):
+ absolute_path = os.path.realpath(
+ os.path.join(os.path.dirname(origin_filename), element.dest)
+ )
+ url = repo_url_of(absolute_path)
+ if isinstance(element, marko.block.inline.Image):
+ url = url.replace("/github.com/", "/raw.githubusercontent.com/")
+ url = url.replace("/blob/", "/")
+ element.dest = url
+ if hasattr(element, "children") and element.children:
+ for child in element.children:
+ if isinstance(child, marko.element.Element):
+ _update_links(child, origin_filename)
diff --git a/monitoring/uss_qualifier/reports/sequence_view/generate.py b/monitoring/uss_qualifier/reports/sequence_view/generate.py
index 12503ee7ba..db859d1a98 100644
--- a/monitoring/uss_qualifier/reports/sequence_view/generate.py
+++ b/monitoring/uss_qualifier/reports/sequence_view/generate.py
@@ -109,7 +109,15 @@ def _skipped_action_of(report: SkippedActionReport) -> ActionNode:
return parent
-def _compute_action_node(report: TestSuiteActionReport, indexer: Indexer) -> ActionNode:
+def compute_action_node(report: TestSuiteActionReport, indexer: Indexer) -> ActionNode:
+ """Summarize the information in the provided report as an ActionNode.
+
+ Args:
+ report: Test report containing information to summarize.
+ indexer: Tracker for labeling executed test scenarios as they are discovered.
+
+ Returns: Report information summarized to support a sequence view artifact.
+ """
(
is_test_suite,
is_test_scenario,
@@ -123,7 +131,7 @@ def _compute_action_node(report: TestSuiteActionReport, indexer: Indexer) -> Act
scenario=compute_tested_scenario(report.test_scenario, indexer),
)
elif is_test_suite:
- children = [_compute_action_node(a, indexer) for a in report.test_suite.actions]
+ children = [compute_action_node(a, indexer) for a in report.test_suite.actions]
return ActionNode(
name=report.test_suite.name,
node_type=ActionNodeType.Suite,
@@ -137,8 +145,7 @@ def _compute_action_node(report: TestSuiteActionReport, indexer: Indexer) -> Act
name=generator_type.get_name(),
node_type=ActionNodeType.ActionGenerator,
children=[
- _compute_action_node(a, indexer)
- for a in report.action_generator.actions
+ compute_action_node(a, indexer) for a in report.action_generator.actions
],
)
else:
@@ -257,7 +264,18 @@ def _generate_scenario_pages(
_generate_scenario_pages(child, config, output_path)
-def _make_resources_config(config: TestConfiguration) -> dict:
+def make_resources_config(config: TestConfiguration) -> dict:
+ """Describe the resources in a TestConfiguration, broken down between Baseline and Environment.
+
+ Args:
+ config: TestConfiguration with resources to describe
+
+ Returns: Multi-level dict with levels:
+ * Baseline / Environment
+ *
+ * Specification ->
+ * Dependencies ->
+ """
baseline = {}
environment = {}
non_baseline_inputs = (
@@ -289,9 +307,9 @@ def _make_resources_config(config: TestConfiguration) -> dict:
def generate_sequence_view(
report: TestRunReport, config: SequenceViewConfiguration, output_path: str
) -> None:
- node = _compute_action_node(report.report, Indexer())
+ node = compute_action_node(report.report, Indexer())
- resources_config = _make_resources_config(report.configuration.v1.test_run)
+ resources_config = make_resources_config(report.configuration.v1.test_run)
os.makedirs(output_path, exist_ok=True)
_generate_scenario_pages(node, config, output_path)
diff --git a/monitoring/uss_qualifier/reports/templates/globally_expanded/style.html b/monitoring/uss_qualifier/reports/templates/globally_expanded/style.html
new file mode 100644
index 0000000000..77772f42cd
--- /dev/null
+++ b/monitoring/uss_qualifier/reports/templates/globally_expanded/style.html
@@ -0,0 +1,101 @@
+
diff --git a/requirements.in b/requirements.in
index 654ad4294f..0c81c5f078 100644
--- a/requirements.in
+++ b/requirements.in
@@ -27,7 +27,7 @@ kubernetes==23.3.0 # deployment_manager
locust==1.3.2 # loadtest
loguru==0.6.0
lxml==5.0.2
-marko==1.2.2 # uss_qualifier
+marko==2.1.2 # uss_qualifier
numpy==1.26.4
oauthlib==3.2.2 # mock_uss tracer
pem==21.2.0 # deployment_manager
diff --git a/requirements.txt b/requirements.txt
index ca09ad62f6..839ae34b31 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -848,9 +848,9 @@ lxml==5.0.2 \
# via
# -r requirements.in
# pykml
-marko==1.2.2 \
- --hash=sha256:d467c3ad450be693505cb8eb7054718efe3f14fccc2310588f9bc9c1d992ef94 \
- --hash=sha256:e2d4a299bfc5ef601ba0f0827c1927cee26e451581853c5333f2db098e609c0d
+marko==2.1.2 \
+ --hash=sha256:a9170006b879376e6845c91b1ae3dce2992772954b99b70175ff888537186011 \
+ --hash=sha256:c14aa7a77468aaaf53cf056dcd3d32398b9df4c3fb81f5e120dd37cbb9f8c859
# via -r requirements.in
markupsafe==2.1.5 \
--hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \
diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json
index f2b7177612..ba2f067fc9 100644
--- a/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json
+++ b/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json
@@ -7,6 +7,17 @@
"description": "Path to content that replaces the $ref",
"type": "string"
},
+ "globally_expanded_report": {
+ "description": "If specified, configuration describing a desired report mimicking what might be seen had the test run been conducted manually.",
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "$ref": "GloballyExpandedReportConfiguration.json"
+ }
+ ]
+ },
"raw_report": {
"description": "Configuration for raw report generation",
"oneOf": [
diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/GloballyExpandedReportConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/GloballyExpandedReportConfiguration.json
new file mode 100644
index 0000000000..57329a96a6
--- /dev/null
+++ b/schemas/monitoring/uss_qualifier/configurations/configuration/GloballyExpandedReportConfiguration.json
@@ -0,0 +1,16 @@
+{
+ "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/configurations/configuration/GloballyExpandedReportConfiguration.json",
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "description": "monitoring.uss_qualifier.configurations.configuration.GloballyExpandedReportConfiguration, as defined in monitoring/uss_qualifier/configurations/configuration.py",
+ "properties": {
+ "$ref": {
+ "description": "Path to content that replaces the $ref",
+ "type": "string"
+ },
+ "redact_access_tokens": {
+ "description": "When True, look for instances of \"Authorization\" keys in the report with values starting \"Bearer \" and redact the signature from those access tokens",
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+}
\ No newline at end of file