Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue253 testexamplescoverage #315

Open
wants to merge 17 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 80 additions & 1 deletion buildingspy/development/regressiontest.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,9 @@ def __init__(
self._data = []
self._reporter = rep.Reporter(os.path.join(os.getcwd(), "unitTests-{}.log".format(tool)))

# packages set by test single packages flag
self._packages = []

# By default, include export of FMUs.
self._include_fmu_test = True

Expand Down Expand Up @@ -757,11 +760,12 @@ def setSinglePackage(self, packageName):

# Set data dictionary as it may have been generated earlier for the whole library.
self._data = []

self._packages = []
for pac in packages:
pacSep = pac.find('.')
pacPat = pac[pacSep + 1:]
pacPat = pacPat.replace('.', os.sep)
self._packages.append(pacPat)
rooPat = os.path.join(self._libHome, 'Resources', 'Scripts', 'Dymola', pacPat)
# Verify that the directory indeed exists
if not os.path.isdir(rooPat):
Expand Down Expand Up @@ -3674,3 +3678,78 @@ def _analyseOMStats(self, lines=None, models=None, simulate=False):

print("\nMore detailed information is stored in self._omstats")
print(70 * '#')

def get_models_coverage(self):
"""
Analyse how many examples are tested.

Returns the share of tested examples
"""
# first lines copy paste from run function
if self.get_number_of_tests() == 0:
self.setDataDictionary(self._rootPackage)

# Remove all data that do not require a simulation or an FMU export.
# Otherwise, some processes may have no simulation to run and then
# the json output file would have an invalid syntax

# to not interact with other code here, we use the temp_data list

temp_data = []

for ele in self._data[:]:
if (ele['mustSimulate'] or ele['mustExportFMU']):
temp_data.append(ele)

# now we got clean _data to compare
# next step get all examples in the package (whether whole library or
# single package)

if self._packages:
packs = self._packages
else:
packs = list(dict.fromkeys(
[pac['ScriptFile'].split(os.sep)[0] for pac in self._data]))

tested_model_names = [
nam['ScriptFile'].split(os.sep)[-1][:-1] for nam in temp_data]

total_examples = []
for pac in packs:
for (dirpath, dirnames, filenames) in os.walk(
os.path.join(self._libHome, pac)):
for f in filenames:
total_examples.append(os.path.abspath(
os.path.join(dirpath, f)))

# list filtering only relevant examples, that could be nicer for sure
total_examples = [
i for i in total_examples if any(
xs in i for xs in ['Examples', 'Validation'])]
total_examples = [i for i in total_examples if not i.endswith(
('package.mo', '.order'))]

coverage = round(len(temp_data) / len(total_examples), 2) * 100

print('***\n\nModel Coverage: ', str(int(coverage)) + '%')
print(
'***\n\nYou are testing : ',
len(temp_data),
' out of ',
len(total_examples),
'total examples in ')
for pac in packs:
print(pac)
print('\n')

tested_model_names = [
nam['ScriptFile'].split(os.sep)[-1][:-1] for nam in temp_data]

missing_examples = [
i for i in total_examples if not any(
xs in i for xs in tested_model_names)]

if missing_examples:
print('***\n\nThe following examples are not tested\n')
for i in missing_examples:
print(i.split(self._libHome)[1])