diff --git a/.bazelrc b/.bazelrc index bd6c994904..2be5855993 100644 --- a/.bazelrc +++ b/.bazelrc @@ -17,7 +17,6 @@ common --registry=https://bcr.bazel.build # Flags needed by score_baselibs and communication modules. # Do not add more! -build --@score_baselibs//score/mw/log/detail/flags:KUse_Stub_Implementation_Only=False build --@score_baselibs//score/mw/log/flags:KRemote_Logging=False build --@score_baselibs//score/json:base_library=nlohmann build --@score_communication//score/mw/com/flags:tracing_library=stub diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml new file mode 100644 index 0000000000..2c41924f66 --- /dev/null +++ b/.github/codeql/codeql-config.yml @@ -0,0 +1,7 @@ +name: "Custom CodeQL Configuration for MISRA" + +paths-ignore: + - "**/*test*" + - "**/*mock*" + - "**/test/**" + - "**/mock/**" \ No newline at end of file diff --git a/.github/codeql/coding-standards.yml b/.github/codeql/coding-standards.yml new file mode 100644 index 0000000000..8675280ae0 --- /dev/null +++ b/.github/codeql/coding-standards.yml @@ -0,0 +1,3 @@ +deviations: [] +guideline-recategorizations: [] +deviation-permits: [] diff --git a/.github/workflows/codeql-multiple-repo-scan.yml b/.github/workflows/codeql-multiple-repo-scan.yml new file mode 100644 index 0000000000..aaca77bfa2 --- /dev/null +++ b/.github/workflows/codeql-multiple-repo-scan.yml @@ -0,0 +1,206 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +name: "CodeQL - Multi-Repo Source Scan" + +on: + pull_request: + types: [opened, reopened, synchronize] + merge_group: + types: [checks_requested] + +permissions: + contents: write + +jobs: + analyze-repos: + name: Analyze Multiple Repositories + runs-on: ubuntu-latest + permissions: + security-events: write + packages: read + actions: read + contents: read + + steps: + - name: Checkout central repository + uses: actions/checkout@v4 + + - name: Checkout CodeQL Coding Standards scripts + uses: actions/checkout@v4 + with: + repository: github/codeql-coding-standards + path: codeql-coding-standards-repo # Klonen in diesen Ordner + ref: main # Oder eine spezifische Release-Version, z.B. 'v2.53.0-dev' + + # Add coding standard packages and dependencies + - name: Install Python dependencies for Coding Standards scripts + run: | + python3 -m pip install --upgrade pip + pip3 install pyyaml jsonpath-ng jsonschema jsonpatch jsonpointer pytest sarif-tools + + - name: Parse known_good.json and create repos.json + id: parse-repos + run: | + sudo apt-get update && sudo apt-get install -y jq + JSON_FILE="./known_good.json" + + # Check if the file exists + if [ ! -f "$JSON_FILE" ]; then + echo "Error file not found '$JSON_FILE' " + ls -la . + exit 1 + fi + + # Create repos.json from known_good.json + # This jq command transforms the 'modules' object into an array of repository objects + # with 'name', 'url', 'version' (branch/tag/hash), and 'path'. + jq '[.modules | to_entries[] | { + name: .key, + url: .value.repo, + version: (.value.branch // .value.hash // .value.version), + path: ("repos/" + .key) + }]' "$JSON_FILE" > repos.json + + echo "Generated repos.json:" + cat repos.json + echo "" # Add a newline for better readability + + # The following GITHUB_OUTPUT variables are set for each module. + # These might be useful for other steps, but are not directly used by the 'checkout-repos' step + # which now reads 'repos.json' directly. + echo "MODULE_COUNT=$(jq '.modules | length' "$JSON_FILE")" >> $GITHUB_OUTPUT + + jq -c '.modules | to_entries[]' "$JSON_FILE" | while read -r module_entry; do + module_name=$(echo "$module_entry" | jq -r '.key') + repo_url=$(echo "$module_entry" | jq -r '.value.repo // empty') + version=$(echo "$module_entry" | jq -r '.value.version // empty') + branch=$(echo "$module_entry" | jq -r '.value.branch // empty') + hash=$(echo "$module_entry" | jq -r '.value.hash // empty') + + echo "${module_name}_url=$repo_url" >> $GITHUB_OUTPUT + + if [ -n "$version" ]; then + echo "${module_name}_version=$version" >> $GITHUB_OUTPUT + fi + + if [ -n "$branch" ]; then + echo "${module_name}_branch=$branch" >> $GITHUB_OUTPUT + fi + + if [ -n "$hash" ]; then + echo "${module_name}_hash=$hash" >> $GITHUB_OUTPUT + fi + done + + - name: Checkout all pinned repositories + id: checkout-repos + run: | + # jq is already installed by the previous step. + + # Read repositories from the repos.json file created by the previous step + repos=$(cat repos.json) + repo_count=$(echo "$repos" | jq length) + + # Initialize an empty string for paths to be outputted + repo_paths_output="" + + for i in $(seq 0 $((repo_count-1))); do + name=$(echo "$repos" | jq -r ".[$i].name") + url=$(echo "$repos" | jq -r ".[$i].url") + ref=$(echo "$repos" | jq -r ".[$i].version") # This can be a branch, tag, or commit hash + path=$(echo "$repos" | jq -r ".[$i].path") # e.g., "repos/score_baselibs" + + echo "Checking out $name ($ref) to $path" + + # Create the parent directory if it doesn't exist + mkdir -p "$(dirname "$path")" + + # Check if 'ref' looks like a commit hash (e.g., 40 hex characters) + # This is a heuristic; a more robust check might involve fetching refs first. + if [[ "$ref" =~ ^[0-9a-fA-F]{40}$ ]]; then + echo " Detected commit hash. Cloning and then checking out." + git clone "$url" "$path" + (cd "$path" && git checkout "$ref") + else + echo " Detected branch/tag. Cloning with --branch." + git clone --depth 1 --branch "$ref" "$url" "$path" + fi + + # Append the path to the list, separated by commas + if [ -z "$repo_paths_output" ]; then + repo_paths_output="$path" + else + repo_paths_output="$repo_paths_output,$path" + fi + done + + # Output all paths as a single variable + echo "repo_paths=$repo_paths_output" >> $GITHUB_OUTPUT + + - name: Initialize CodeQL for all repositories + uses: github/codeql-action/init@v4 + with: + languages: cpp + build-mode: none + packs: codeql/misra-cpp-coding-standards + config-file: ./.github/codeql/codeql-config.yml + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 + with: + upload-database: false # Don't upload databases for each repo + output: sarif-results/ + category: "multi-repo-scan" + + - name: Recategorize Guidelines + if: always() + run: | + RECATEGORIZE_SCRIPT="codeql-coding-standards-repo/scripts/guideline_recategorization/recategorize.py" + CODING_STANDARDS_CONFIG="./.github/codeql/coding-standards.yml" + + CODING_STANDARDS_SCHEMA="codeql-coding-standards-repo/schemas/coding-standards-schema-1.0.0.json" + SARIF_SCHEMA="codeql-coding-standards-repo/schemas/sarif-schema-2.1.0.json" + + + SARIF_FILE="sarif-results/cpp.sarif" + + mkdir -p sarif-results-recategorized + echo "Processing $SARIF_FILE for recategorization..." + python3 "$RECATEGORIZE_SCRIPT" \ + --coding-standards-schema-file "$CODING_STANDARDS_SCHEMA" \ + --sarif-schema-file "$SARIF_SCHEMA" \ + "$CODING_STANDARDS_CONFIG" \ + "$SARIF_FILE" \ + "sarif-results-recategorized/$(basename "$SARIF_FILE")" + + rm "$SARIF_FILE" + mv "sarif-results-recategorized/$(basename "$SARIF_FILE")" "$SARIF_FILE" + + - name: Generate HTML Report from SARIF + run: | + SARIF_FILE="sarif-results/cpp.sarif" + sarif html "$SARIF_FILE" --output codeql-report.html + + - name: Upload SARIF results as artifact + uses: actions/upload-artifact@v4 + with: + name: codeql-sarif-results + path: sarif-results/ + + + - name: Upload HTML Report as artifact + uses: actions/upload-artifact@v4 + with: + name: codeql-html-report + path: codeql-report.html \ No newline at end of file diff --git a/MODULE.bazel b/MODULE.bazel index 947db62d1e..3db57550c8 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,7 +13,7 @@ module( name = "score_reference_integration", - version = "0.5.0-alpha.1", + version = "0.5.0-beta", compatibility_level = 0, ) diff --git a/qnx_qemu/BUILD b/qnx_qemu/BUILD deleted file mode 100644 index 0ee32b983b..0000000000 --- a/qnx_qemu/BUILD +++ /dev/null @@ -1,135 +0,0 @@ -# ******************************************************************************* -# Copyright (c) 2025 Contributors to the Eclipse Foundation -# -# See the NOTICE file(s) distributed with this work for additional -# information regarding copyright ownership. -# -# This program and the accompanying materials are made available under the -# terms of the Apache License Version 2.0 which is available at -# https://www.apache.org/licenses/LICENSE-2.0 -# -# SPDX-License-Identifier: Apache-2.0 -# ******************************************************************************* -load("@rules_shell//shell:sh_binary.bzl", "sh_binary") -load("@score_itf//:defs.bzl", "py_itf_test") - - -sh_binary( - name = "run_qemu", - srcs = ["scripts/run_qemu.sh"], - args = [ - "$(location @toolchains_qnx_sdp//:host_dir)", - "$(location //build:init)", - ], - data = [ - "//build:init", - "@toolchains_qnx_sdp//:host_all", - "@toolchains_qnx_sdp//:host_dir", - ], -) - -sh_binary( - name = "run_qemu_portforward", - srcs = ["scripts/run_qemu_portforward.sh"], - args = [ - "$(location @toolchains_qnx_sdp//:host_dir)", - "$(location //build:init)", - ], - data = [ - "//build:init", - "@toolchains_qnx_sdp//:host_all", - "@toolchains_qnx_sdp//:host_dir", - ], -) - - -sh_binary( - name = "test_qemu_bridge", - srcs = ["test/test_qnx_qemu_bridge.sh"], - args = [ - "$(location @toolchains_qnx_sdp//:host_dir)", - "$(location //build:init)", - "--timeout=90", - "--ssh-port=2222", - "--boot-wait=15", - ], - data = [ - "//build:init", - "@toolchains_qnx_sdp//:host_all", - "@toolchains_qnx_sdp//:host_dir", - ], -) - -sh_binary( - name = "test_qemu_portforward", - srcs = ["test/test_qnx_qemu_portforward.sh"], - args = [ - "$(location @toolchains_qnx_sdp//:host_dir)", - "$(location //build:init)", - "--timeout=90", - "--ssh-port=2222", - "--boot-wait=15", - ], - data = [ - "//build:init", - "@toolchains_qnx_sdp//:host_all", - "@toolchains_qnx_sdp//:host_dir", - ], -) - -py_itf_test( - name = "test_ssh_qemu", - srcs = [ - "test/itf/test_ssh.py", - ], - args = [ - "--target_config=$(location target_config.json)", - "--ecu=s_core_ecu_qemu", - "--qemu_image=$(location //build:init)", - ], - plugins = [ - "itf.plugins.base.base_plugin", - ], - data = [ - "//build:init", - "target_config.json", - ], -) - -py_itf_test( - name = "test_scrample_qemu", - srcs = [ - "test/itf/test_scrample.py", - ], - args = [ - "--target_config=$(location target_config.json)", - "--ecu=s_core_ecu_qemu", - "--qemu_image=$(location //build:init)", - ], - plugins = [ - "itf.plugins.base.base_plugin", - ], - data = [ - "//build:init", - "target_config.json", - ], -) - -py_itf_test( - name = "test_persistency_qemu", - srcs = [ - "test/itf/test_persistency.py", - ], - args = [ - "--target_config=$(location target_config.json)", - "--ecu=s_core_ecu_qemu", - "--qemu_image=$(location //build:init)", - ], - plugins = [ - "itf.plugins.base.base_plugin", - ], - data = [ - "//build:init", - "target_config.json", - ], -) diff --git a/score_toolchains.MODULE.bazel b/score_toolchains.MODULE.bazel index 7a0dacf371..6a0207f842 100644 --- a/score_toolchains.MODULE.bazel +++ b/score_toolchains.MODULE.bazel @@ -12,7 +12,7 @@ # ******************************************************************************* # QNX toolchain -bazel_dep(name = "score_toolchains_qnx", version = "0.0.2") +bazel_dep(name = "score_toolchains_qnx", version = "0.0.6") toolchains_qnx = use_extension("@score_toolchains_qnx//:extensions.bzl", "toolchains_qnx") toolchains_qnx.sdp( sha256 = "f2e0cb21c6baddbcb65f6a70610ce498e7685de8ea2e0f1648f01b327f6bac63", diff --git a/scripts/integration_test.sh b/scripts/integration_test.sh index dc35bef5e1..45b344a5ea 100755 --- a/scripts/integration_test.sh +++ b/scripts/integration_test.sh @@ -111,7 +111,7 @@ any_failed=0 for group in "${!BUILD_TARGET_GROUPS[@]}"; do targets="${BUILD_TARGET_GROUPS[$group]}" log_file="${LOG_DIR}/${group}.log" - + # Log build group banner only to stdout/stderr (not into summary table file) echo "--- Building group: ${group} ---" start_ts=$(date +%s) diff --git a/scripts/run_unit_tests.sh b/scripts/run_unit_tests.sh index 00af935e3d..6ff54a711d 100755 --- a/scripts/run_unit_tests.sh +++ b/scripts/run_unit_tests.sh @@ -13,7 +13,7 @@ declare -A UT_TARGET_GROUPS=( -@score_baselibs//score/language/safecpp/aborts_upon_exception:abortsuponexception_toolchain_test \ -@score_baselibs//score/containers:dynamic_array_test \ -@score_baselibs//score/mw/log/configuration:* \ - -@score_baselibs//score/json/examples:*" + -@score_baselibs//score/json/examples:*" [communication]="@score_communication//score/mw/com/impl/... -- \ -@score_communication//score/mw/com/impl:unit_test_runtime_single_exec \ -@score_communication//score/mw/com/impl/configuration:config_parser_test \ @@ -28,6 +28,9 @@ declare -A UT_TARGET_GROUPS=( # Markdown table header echo -e "Status\tPassed\tFailed\tSkipped\tTotal\tGroup\tDuration(s)" >> "${SUMMARY_FILE}" +# Track if any test failed +any_failed=0 + for group in "${!UT_TARGET_GROUPS[@]}"; do targets="${UT_TARGET_GROUPS[$group]}" command="bazel test --config="${CONFIG}" ${targets}" @@ -36,11 +39,11 @@ for group in "${!UT_TARGET_GROUPS[@]}"; do echo "${command}" echo "===========================================" start_ts=$(date +%s) - out=$(bazel test --test_summary=testcase --test_output=errors --nocache_test_results --config="${CONFIG}" ${targets} 2>&1 | tee "${LOG_DIR}/ut_${group}_output.log") + out=$(bazel test --test_summary=testcase --test_output=errors --nocache_test_results --config="${CONFIG}" ${targets} 2>&1 | tee "${LOG_DIR}/ut_${group}_output.log") build_status=${PIPESTATUS[0]} end_ts=$(date +%s) duration=$(( end_ts - start_ts )) - + # Parse bazel output tests_passed=$(echo "$out" | grep -Eo '[0-9]+ passing' | grep -Eo '[0-9]+' | head -n1) tests_failed=$(echo "$out" | grep -Eo '[0-9]+ failing' | grep -Eo '[0-9]+' | head -n1) @@ -50,13 +53,20 @@ for group in "${!UT_TARGET_GROUPS[@]}"; do status_symbol="✅" else status_symbol="❌" + any_failed=1 fi - + # Append as a markdown table row - echo -e "${status_symbol}\t${tests_passed}\t${tests_failed}\t${tests_skipped}\t${tests_executed}\t${group}\t${duration}s" >> "${SUMMARY_FILE}" + echo -e "${status_symbol}\t${tests_passed}\t${tests_failed}\t${tests_skipped}\t${tests_executed}\t${group}\t${duration}s" | tee -a "${SUMMARY_FILE}" echo "===========================================" echo -e "\n\n" done # Align the summary table columns column -t -s $'\t' "${SUMMARY_FILE}" > "${SUMMARY_FILE}.tmp" && mv "${SUMMARY_FILE}.tmp" "${SUMMARY_FILE}" + +# Final check: exit with non-zero if any test failed +if [[ $any_failed -ne 0 ]]; then + echo "Some unit test groups failed. Exiting with non-zero status." + exit 1 +fi \ No newline at end of file