diff --git a/.github/workflows/unittests.yml b/.github/workflows/unittests.yml index f1f98ed19..210fff86c 100644 --- a/.github/workflows/unittests.yml +++ b/.github/workflows/unittests.yml @@ -40,6 +40,8 @@ jobs: # Set run_tests to run only the specific tests you need to fix. - name: run_tests + env: + COLUMNS: 120 run: | ./test/run_tests diff --git a/lib/pavilion/builder.py b/lib/pavilion/builder.py index 0fa79ae76..f34ff37c0 100644 --- a/lib/pavilion/builder.py +++ b/lib/pavilion/builder.py @@ -460,7 +460,7 @@ def build(self, test_id: str, tracker: BuildTracker, directory into our test directory, and note that we've used the given build. - :param test_id: The test 'full_id' for the test initiating this build. + :param test_id: The test id for the test initiating this build. :param tracker: A thread-safe tracker object for keeping info on what the build is doing. :param cancel_event: Allows builds to tell each other @@ -570,7 +570,7 @@ def _build(self, build_dir, cancel_event, test_id, tracker: BuildTracker) -> boo :param Path build_dir: The directory in which to perform the build. :param threading.Event cancel_event: Event to signal that the build should stop. - :param test_id: The 'full_id' of the test initiating the build. + :param test_id: The ID of the test initiating the build. :param tracker: Build tracker for this build. :returns: True or False, depending on whether the build appears to have been successful. @@ -592,7 +592,8 @@ def _build(self, build_dir, cancel_event, test_id, tracker: BuildTracker) -> boo # Do the build, and wait for it to complete. with self.tmp_log_path.open('w') as build_log: # Build scripts take the test id as a first argument. - cmd = [self._script_path.as_posix(), test_id] + cmd = [self._script_path.as_posix(), str(test_id)] + proc = subprocess.Popen(cmd, cwd=build_dir.as_posix(), stdout=build_log, @@ -797,10 +798,6 @@ def _setup_build_dir(self, dest: Path, tracker: BuildTracker) -> None: raise TestBuilderError("Error extracting file '{}'\n {}" .format(src_path.as_posix(), extract_error)) - tracker.update( - state=STATES.BUILDING, - note="Generating dynamically created files.") - # Create build time file(s). for file, contents in self._config.get('create_files', {}).items(): try: diff --git a/lib/pavilion/cancel_utils.py b/lib/pavilion/cancel_utils.py index 880da0c83..ae19a2f3e 100644 --- a/lib/pavilion/cancel_utils.py +++ b/lib/pavilion/cancel_utils.py @@ -100,7 +100,7 @@ def cancel_tests(pav_cfg: PavConfig, tests: Iterable[TestRun], outfile: TextIO, title="Cancelling {} test{}".format(test_count, 's' if test_count > 1 else ''), outfile=outfile, fields=['name', 'id', 'state', 'series'], - rows=[{'name': test.name, 'id': test.full_id, + rows=[{'name': test.name, 'id': test.id, 'state': test.status.current().state, 'series': test.series} for test in cancelled_test_info]) else: diff --git a/lib/pavilion/cmd_utils.py b/lib/pavilion/cmd_utils.py index 10acb3727..d75bf4b93 100644 --- a/lib/pavilion/cmd_utils.py +++ b/lib/pavilion/cmd_utils.py @@ -1,14 +1,14 @@ """The module contains functions and classes that are generally useful across multiple commands.""" -import argparse import datetime as dt import io import logging import sys import time from pathlib import Path -from typing import List, TextIO, Union, Iterator, Optional +from argparse import Namespace +from typing import List, TextIO, Union, Iterator, Optional, Callable from collections import defaultdict from pavilion import config @@ -16,25 +16,26 @@ from pavilion import filters from pavilion import groups from pavilion import output -from pavilion import series from pavilion import sys_vars from pavilion import utils +from pavilion.series import TestSeries, SeriesInfo, list_series_tests, mk_series_info_transform +from pavilion.id_utils import load_user_series_id from pavilion.errors import TestRunError, CommandError, TestSeriesError, \ PavilionError, TestGroupError from pavilion.test_run import TestRun, load_tests, TestAttributes -from pavilion.test_ids import TestID, SeriesID +from pavilion.test_ids import TestID, SeriesID, ID from pavilion.types import ID_Pair from pavilion.micro import flatten LOGGER = logging.getLogger(__name__) -def load_last_series(pav_cfg, errfile: TextIO) -> Union[series.TestSeries, None]: +def load_last_series(pav_cfg: config.PavConfig, errfile: TextIO) -> Optional[TestSeries]: """Load the series object for the last series run by this user on this system.""" try: - series_id = series.load_user_series_id(pav_cfg) - except series.TestSeriesError as err: + series_id = load_user_series_id(pav_cfg) + except TestSeriesError as err: output.fprint(errfile, "Failed to find last series: {}".format(err.args[0])) return None @@ -43,73 +44,55 @@ def load_last_series(pav_cfg, errfile: TextIO) -> Union[series.TestSeries, None] return None try: - return series.TestSeries.load(pav_cfg, series_id.id_str) - except series.TestSeriesError as err: + return TestSeries.load(pav_cfg, series_id) + except TestSeriesError as err: output.fprint(errfile, "Failed to load last series: {}".format(err.args[0])) return None - -def set_arg_defaults(args): - """Set typical argument defaults, but don't override any given.""" - - # Don't assume these actually exist. - def_filter = make_filter_query() - args.filter = getattr(args, 'filter', def_filter) - - -def arg_filtered_tests(pav_cfg: "PavConfig", args: argparse.Namespace, - verbose: TextIO = None) -> dir_db.SelectItems: - """Search for test runs that match based on the argument values in args, +def arg_filtered_tests(pav_cfg: config.PavConfig, + tests: List[TestID], + series: List[SeriesID], + filter_query: Optional[str] = None, + sort_by: Optional[str] = None, + limit: Optional[int] = None, + verbose: Optional[TextIO] = None) -> dir_db.SelectItems: + """Search for test runs that match based on the specified tests and series IDs, and return a list of matching test id's. - Note: I know this violates the idea that we shouldn't be passing a - generic object around and just using random bits of an undefined interface. - BUT: - - 1. The interface is well defined, by `filters.add_test_filter_args`. - 2. All of the used bits are *ALWAYS* used, so any errors will pop up - immediately in unit tests. - - TODO: Rewrite the interface so that it's cleaner and not coupled to argparse. - HW - :param pav_cfg: The Pavilion config. - :param args: An argument namespace with args defined by - `filters.add_test_filter_args`, plus one additional `tests` argument - that should contain a list of test id's, series id's, or the 'last' - or 'all' keyword. Last implies the last test series run by the current user - on this system (and is the default if no tests are given. 'all' means all tests. + :param tests: A list of test IDs on which to filter. + :param series: A list of series IDs whose tests should be filtered. + :param limit: The maximum number of test runs to return. + :param sort_by: The field on which to sort. + :param filter_query: The query to use when filtering tests. :param verbose: A file like object to report test search status. :return: A list of test paths. """ - limit = getattr(args, 'limit', filters.TEST_FILTER_DEFAULTS['limit']) verbose = verbose or io.StringIO() - sys_name = getattr(args, 'sys_name', sys_vars.get_vars(defer=True).get('sys_name')) - sort_by = getattr(args, 'sort_by', 'created') + sort_by = sort_by or "-created" - has_filter_defaults = False + use_default_filter = True - for arg, default in filters.TEST_FILTER_DEFAULTS.items(): - if hasattr(args, arg) and default != getattr(args, arg): - has_filter_defaults = True - break + if sort_by != "-created" or limit is not None or filter_query is not None: + use_default_filter = False - if SeriesID("all") in args.tests and args.filter is not None and not has_filter_defaults: + if SeriesID("all") in series and use_default_filter: output.fprint(verbose, "Using default search filters: The current system, user, and " "created less than 1 day ago.", color=output.CYAN) - args.filter = make_filter_query() + filter_query = make_filter_query() - if args.filter is None: + if filter_query is None: filter_func = filters.const(True) # Always return True else: try: - filter_func = filters.parse_query(args.filter) + filter_func = filters.parse_query(filter_query) except filters.FilterParseError: - raise PavilionError(f"Invalid syntax in filter query: {args.filter}") + raise PavilionError(f"Invalid syntax in filter query: {filter_query}") order_func, order_asc = filters.get_sort_opts(sort_by, "TEST") - if SeriesID("all") in args.tests: + if SeriesID("all") in series: tests = dir_db.SelectItems([], []) working_dirs = set(map(lambda cfg: cfg['working_dir'], pav_cfg.configs.values())) @@ -130,7 +113,19 @@ def arg_filtered_tests(pav_cfg: "PavConfig", args: argparse.Namespace, return tests - test_paths = test_list_to_paths(pav_cfg, args.tests, verbose) + test_paths = test_list_to_paths(pav_cfg, tests, verbose) + + for sid in series: + if sid.last(): + sid_ = load_user_series_id(pav_cfg, errfile=verbose) + + if sid_ is None: + output.fprint(verbose, "No last series found.") + continue + else: + sid_ = sid + + test_paths.extend(map(lambda x: x.resolve(), list_series_tests(pav_cfg, sid_))) return dir_db.select_from( pav_cfg, @@ -142,7 +137,6 @@ def arg_filtered_tests(pav_cfg: "PavConfig", args: argparse.Namespace, limit=limit ) - def make_filter_query() -> str: """Construct the default filter query, which targets tests created by the current user on the current system more recently than 1 day ago.""" @@ -161,28 +155,34 @@ def make_filter_query() -> str: return template.format(*fargs) -def arg_filtered_series(pav_cfg: config.PavConfig, args: argparse.Namespace, - verbose: TextIO = None) -> List[series.SeriesInfo]: +def arg_filtered_series(pav_cfg: config.PavConfig, + series: List[SeriesID], + filter_query: Optional[str] = None, + sort_by: Optional[str] = None, + limit: Optional[int] = None, + verbose: Optional[TextIO] = None) -> List[SeriesInfo]: """Return a list of SeriesInfo objects based on the args.series attribute. When args.series is empty, default to the 'last' series started by the user on this system. If 'all' is given, search all series (with a default current user/system/1-day filter) and additonally filtered by args attributes provied via filters.add_series_filter_args().""" - limit = getattr(args, 'limit', filters.SERIES_FILTER_DEFAULTS['limit']) verbose = verbose or io.StringIO() + sort_by = sort_by or "-status_when" - if SeriesID('all') in args.series: - for arg, default in filters.SERIES_FILTER_DEFAULTS.items(): - if hasattr(args, arg) and default != getattr(args, arg): - break - else: - output.fprint(verbose, "Using default search filters: The current system, user, and " - "created less than 1 day ago.", color=output.CYAN) - args.filter = make_filter_query() + use_default_filter = True + + if sort_by != "-status_when" or limit is not None or filter_query is not None: + use_default_filter = False + + if SeriesID("all") in series and use_default_filter: + output.fprint(verbose, "Using default search filters: The current system, user, and " + "created less than 1 day ago.", color=output.CYAN) + filter_query = make_filter_query() seen_sids = [] found_series = [] - for sid in args.series: + + for sid in series: # Go through each provided sid (including last and all) and find all # matching series. Then only add them if we haven't seen them yet. if sid.last(): @@ -193,22 +193,21 @@ def arg_filtered_series(pav_cfg: config.PavConfig, args: argparse.Namespace, found_series.append(last_series.info()) elif sid.all(): - sort_by = getattr(args, 'sort_by', filters.SERIES_FILTER_DEFAULTS['sort_by']) order_func, order_asc = filters.get_sort_opts(sort_by, 'SERIES') - if args.filter is None: + if filter_query is None: filter_func = filters.const(True) # Always return True else: try: - filter_func = filters.parse_query(args.filter) + filter_func = filters.parse_query(filter_query) except filters.FilterParseError: - raise PavilionError(f"Invalid syntax in filter query: {args.filter}") + raise PavilionError(f"Invalid syntax in filter query: {filter_query}") found_series = dir_db.select( pav_cfg=pav_cfg, id_dir=pav_cfg.working_dir/'series', filter_func=filter_func, - transform=series.mk_series_info_transform(pav_cfg), + transform=mk_series_info_transform(pav_cfg), order_func=order_func, order_asc=order_asc, use_index=False, @@ -216,18 +215,18 @@ def arg_filtered_series(pav_cfg: config.PavConfig, args: argparse.Namespace, limit=limit, ).data else: - found_series.append(series.SeriesInfo.load(pav_cfg, sid.id_str)) + found_series.append(SeriesInfo.load(pav_cfg, sid)) matching_series = [] for sinfo in found_series: - if sinfo.sid not in seen_sids: + if sinfo.id not in seen_sids: matching_series.append(sinfo) - seen_sids.append(sinfo.sid) + seen_sids.append(sinfo.id) return matching_series -def read_test_files(pav_cfg, files: List[str]) -> List[str]: +def read_test_files(pav_cfg: config.PavConfig, files: List[str]) -> List[str]: """Read the given files which contain a list of tests (removing comments) and return a list of test names.""" @@ -260,7 +259,7 @@ def read_test_files(pav_cfg, files: List[str]) -> List[str]: return tests -def get_collection_path(pav_cfg, collection) -> Union[Path, None]: +def get_collection_path(pav_cfg: config.PavConfig, collection: str) -> Optional[Path]: """Find a collection in one of the config directories. Returns None on failure.""" # Check if this collection exists in one of the defined config dirs @@ -273,8 +272,9 @@ def get_collection_path(pav_cfg, collection) -> Union[Path, None]: return None -def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]: - """Given a list of raw test id's and series id's, return a list of paths +def test_list_to_paths(pav_cfg: config.PavConfig, req_tests: List[Union[ID]], + errfile: Optional[TextIO] = None) -> List[Path]: + """Given a list of test id's and series id's, return a list of paths to those tests. The keyword 'last' may also be given to get the last series run by the current user on the current machine. @@ -282,7 +282,7 @@ def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]: :param pav_cfg: The Pavilion config. :param req_tests: A list of test id's, series id's, or 'last'. :param errfile: An option output file for printing errors. - :return: A list of test id's. + :return: A list of test paths. """ if errfile is None: @@ -292,7 +292,7 @@ def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]: for raw_id in req_tests: if isinstance(raw_id, SeriesID) and raw_id.last(): - raw_id = series.load_user_series_id(pav_cfg, errfile) + raw_id = load_user_series_id(pav_cfg, errfile) if raw_id is None: output.fprint(errfile, "User has no 'last' series for this machine.", color=output.YELLOW) @@ -300,7 +300,7 @@ def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]: if isinstance(raw_id, TestID): try: - test_wd, _id = TestRun.parse_raw_id(pav_cfg, raw_id.id_str) + test_wd, _id = TestRun.parse_raw_id(pav_cfg, raw_id) except TestRunError as err: output.fprint(errfile, err, color=output.YELLOW) continue @@ -314,14 +314,14 @@ def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]: elif isinstance(raw_id, SeriesID): try: test_paths.extend( - series.list_series_tests(pav_cfg, raw_id.id_str)) + list_series_tests(pav_cfg, raw_id)) except TestSeriesError: output.fprint(errfile, "Invalid series id '{}'".format(raw_id), color=output.YELLOW) else: # A group try: - group = groups.TestGroup(pav_cfg, raw_id.id_str) + group = groups.TestGroup(pav_cfg, raw_id) except TestGroupError as err: output.fprint( errfile, @@ -346,29 +346,21 @@ def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]: return test_paths -def _filter_tests_by_raw_id(pav_cfg, id_pairs: List[ID_Pair], +def _filter_tests_by_raw_id(pav_cfg: config.PavConfig, id_pairs: List[ID_Pair], exclude_ids: List[TestID]) -> List[ID_Pair]: """Filter the given tests by raw id.""" exclude_pairs = [] - for raw_id in exclude_ids: - label = raw_id.label - ex_id = raw_id.test_num + ex_wd = Path(pav_cfg.get("working_dir")) - ex_wd = pav_cfg['configs'].get(label, None) - if ex_wd is None: - # Invalid label. - continue - - ex_wd = Path(ex_wd) - exclude_pairs.append((ex_wd, ex_id)) + exclude_pairs = [ID_Pair(ex_wd, id) for id in exclude_ids] return [pair for pair in id_pairs if pair not in exclude_pairs] -def get_tests_by_paths(pav_cfg, test_paths: List[Path], errfile: TextIO, - exclude_ids: List[str] = None) -> List[TestRun]: +def get_tests_by_paths(pav_cfg: config.PavConfig, test_paths: List[Path], errfile: TextIO, + exclude_ids: List[TestID] = None) -> List[TestRun]: """Given a list of paths to test run directories, return the corresponding list of tests. @@ -387,12 +379,7 @@ def get_tests_by_paths(pav_cfg, test_paths: List[Path], errfile: TextIO, test_path = test_path.resolve() test_wd = test_path.parents[1] - try: - test_id = int(test_path.name) - except ValueError: - output.fprint(errfile, "Invalid test id '{}' from test path '{}'" - .format(test_path.name, test_path), color=output.YELLOW) - continue + test_id = TestID(test_path.name) test_pairs.append(ID_Pair((test_wd, test_id))) @@ -402,8 +389,8 @@ def get_tests_by_paths(pav_cfg, test_paths: List[Path], errfile: TextIO, return load_tests(pav_cfg, test_pairs, errfile) -def get_tests_by_id(pav_cfg, test_ids: List[Union[TestID, SeriesID]], errfile: TextIO, - exclude_ids: List[TestID] = None) -> List[TestRun]: +def get_tests_by_id(pav_cfg: config.PavConfig, test_ids: List[Union[TestID, SeriesID]], + errfile: TextIO, exclude_ids: Optional[List[TestID]] = None) -> List[TestRun]: """Convert a list of raw test id's and series id's into a list of test objects. @@ -423,7 +410,7 @@ def get_tests_by_id(pav_cfg, test_ids: List[Union[TestID, SeriesID]], errfile: T if raw_id.last(): series_obj = load_last_series(pav_cfg, errfile) else: - series_obj = series.TestSeries.load(pav_cfg, raw_id.id_str) + series_obj = TestSeries.load(pav_cfg, raw_id) except TestSeriesError as err: output.fprint(errfile, "Suite {} could not be found.\n{}" .format(raw_id, err), color=output.RED) @@ -433,7 +420,7 @@ def get_tests_by_id(pav_cfg, test_ids: List[Union[TestID, SeriesID]], errfile: T # Just a plain test id. else: try: - test_id_pairs.append(TestRun.parse_raw_id(pav_cfg, raw_id.id_str)) + test_id_pairs.append(TestRun.parse_raw_id(pav_cfg, raw_id)) except TestRunError as err: output.fprint(sys.stdout, "Error loading test '{}': {}" @@ -444,7 +431,7 @@ def get_tests_by_id(pav_cfg, test_ids: List[Union[TestID, SeriesID]], errfile: T return load_tests(pav_cfg, test_id_pairs, errfile) -def get_testset_name(pav_cfg, tests: List['str'], files: List['str']): +def get_testset_name(pav_cfg: config.PavConfig, tests: List[str], files: List[str]) -> str: """Generate the name for the set set based on the test input to the run command. """ # Expected Behavior: @@ -514,7 +501,7 @@ def get_glob(test_suite_name, test_names): return testset_name -def get_last_test_id(pav_cfg: "PavConfig", errfile: TextIO) -> Optional[TestID]: +def get_last_test_id(pav_cfg: config.PavConfig, errfile: TextIO) -> Optional[TestID]: """Get the ID of the last run test, if it exists, and if there is a single unambigous last test. If there is not, return None.""" @@ -523,7 +510,7 @@ def get_last_test_id(pav_cfg: "PavConfig", errfile: TextIO) -> Optional[TestID]: if last_series is None: return None - test_ids = list(last_series.tests.keys()) + id_pairs = list(last_series.tests.keys()) if len(test_ids) == 0: output.fprint( @@ -537,4 +524,4 @@ def get_last_test_id(pav_cfg: "PavConfig", errfile: TextIO) -> Optional[TestID]: f"Multiple tests exist in last series. Could not unambiguously identify last test.") return None - return TestID(test_ids[0]) + return id_pairs[0][1] diff --git a/lib/pavilion/commands/_log_results.py b/lib/pavilion/commands/_log_results.py index 9569e5852..f58511502 100644 --- a/lib/pavilion/commands/_log_results.py +++ b/lib/pavilion/commands/_log_results.py @@ -5,6 +5,7 @@ from pavilion import output from pavilion import series +from pavilion.test_ids import SeriesID from pavilion.result_logging import get_result_loggers from pavilion.errors import TestSeriesError from .base_classes import Command @@ -23,7 +24,7 @@ def _setup_arguments(self, parser: "ArgParser") -> None: """Sets up arguments for _log_series command. Only needs series ID.""" parser.add_argument( - 'series_id', action='store', + 'series_id', type=SeriesID, action='store', help="Series ID." ) diff --git a/lib/pavilion/commands/_run.py b/lib/pavilion/commands/_run.py index 597192886..a2e063566 100644 --- a/lib/pavilion/commands/_run.py +++ b/lib/pavilion/commands/_run.py @@ -17,6 +17,7 @@ from pavilion.sys_vars import base_classes from pavilion.test_run import TestRun, mass_status_update from pavilion.variables import VariableSetManager +from pavilion.test_ids import TestID from .base_classes import Command # We need to catch pretty much all exceptions to cleanly report errors. @@ -34,7 +35,7 @@ def __init__(self): def _setup_arguments(self, parser): parser.add_argument( - 'test_ids', action='store', nargs='+', + 'test_ids', type=TestID, action='store', nargs='+', help='The full id of the test to run.') def run(self, pav_cfg, args): @@ -64,7 +65,7 @@ def run(self, pav_cfg, args): try: self._finalize_test(pav_cfg, test) except PavilionError as err: - fprint(self.outfile, "Error finalizing test run '{}'".format(test.full_id)) + fprint(self.outfile, "Error finalizing test run '{}'".format(test.id)) fprint(self.outfile, err.pformat()) test.status.set(STATES.RUN_ERROR, "Error finalizing test: {}".format(err)) test.set_run_complete() @@ -87,7 +88,7 @@ def run(self, pav_cfg, args): test.status.set(STATES.BUILDING, "Test building on an allocation.") if not test.build(): test.set_run_complete() - fprint(self.outfile, "Test {} build failed.".format(test.full_id)) + fprint(self.outfile, "Test {} build failed.".format(test.id)) continue if not test.build_only: built_tests.append(test) @@ -130,7 +131,7 @@ def _finalize_test(self, pav_cfg: PavConfig, test: TestRun): "Error resolving scheduler variables at run time. " "See'pav log kickoff {}' for the full error.".format(test.id)) raise TestRunError("Error resolving scheduler variables for test {}.\n" - .format(test.full_id) + .format(test.id) + '\n'.join(var_man.get('sched.errors.*'))) try: @@ -141,7 +142,7 @@ def _finalize_test(self, pav_cfg: PavConfig, test: TestRun): "Unexpected error finalizing test\n{}\n" "See 'pav log kickoff {}' for the full error." .format(err, test.id)) - raise TestRunError("Could not finalize test '{}'.".format(test.full_id), prior_err=err) + raise TestRunError("Could not finalize test '{}'.".format(test.id), prior_err=err) def _run_tests(self, pav_cfg, tests): @@ -150,7 +151,7 @@ def _run_tests(self, pav_cfg, tests): # Turn this into a stack tests.reverse() - # Track our running tests by full_id + # Track our running tests by ID running_tests : Dict[str, Tuple[threading.Thread, TestRun]] = {} while tests or running_tests: added_thread = False @@ -165,7 +166,7 @@ def _run_tests(self, pav_cfg, tests): conc_limit = min([test.concurrent for test in next_tests]) if len(running_tests) + 1 <= conc_limit: thread = threading.Thread(target=self._run, args=(next_test,)) - running_tests[next_test.full_id] = (thread, next_test) + running_tests[next_test.id] = (thread, next_test) thread.start() added_thread = True else: @@ -181,7 +182,7 @@ def _run_tests(self, pav_cfg, tests): thread.join() thread_exited = True test.set_run_complete() - del running_tests[test.full_id] + del running_tests[test.id] if not thread_exited: time.sleep(0.5) @@ -244,7 +245,7 @@ def _run(self, test: TestRun): # Some other unexpected exception. test.status.set( STATES.RUN_ERROR, - "Unknown error while running test. Refer to the kickoff log.") + f"Unknown error while running test. Refer to the kickoff log.") return try: diff --git a/lib/pavilion/commands/_series.py b/lib/pavilion/commands/_series.py index 1dc6c6981..baf25d87f 100644 --- a/lib/pavilion/commands/_series.py +++ b/lib/pavilion/commands/_series.py @@ -6,6 +6,7 @@ from pavilion import output from pavilion import series from pavilion.errors import TestSeriesError +from pavilion.test_ids import SeriesID from .base_classes import Command @@ -22,7 +23,7 @@ def _setup_arguments(self, parser): """Sets up arguments for _series command. Only needs series ID.""" parser.add_argument( - 'series_id', action='store', + 'series_id', type=SeriesID, action='store', help="Series ID." ) diff --git a/lib/pavilion/commands/cancel.py b/lib/pavilion/commands/cancel.py index b9b6a3817..478a14e93 100644 --- a/lib/pavilion/commands/cancel.py +++ b/lib/pavilion/commands/cancel.py @@ -8,7 +8,7 @@ from pavilion import cmd_utils from pavilion import filters from pavilion import output -from pavilion import series +from pavilion.series import TestSeries from pavilion.errors import TestSeriesError from pavilion.test_run import TestRun from pavilion.config import PavConfig @@ -47,23 +47,28 @@ def run(self, pav_cfg: PavConfig, args: Namespace) -> int: """Cancel the given tests or series.""" ids = resolve_mixed_ids(args.tests, auto_last=True) - - # Separate out into tests and series - series_ids, test_ids = partition(lambda x: isinstance(x, SeriesID), ids) - - args.tests = list(test_ids) - args.series = list(series_ids) + tests = ids["tests"] + series = ids["series"] test_ret = 0 sers_ret = 0 - if len(args.tests) > 0: - test_paths = cmd_utils.arg_filtered_tests(pav_cfg, args, verbose=self.errfile).paths + if len(tests) > 0: + test_paths = cmd_utils.arg_filtered_tests( + pav_cfg, + tests, + series, + filter_query=args.filter, + limit=args.limit, + verbose=self.errfile).paths tests = cmd_utils.get_tests_by_paths(pav_cfg, test_paths, errfile=self.errfile) test_ret = cancel_utils.cancel_tests(pav_cfg, tests, self.outfile) - if len(args.series) > 0: - sinfos = cmd_utils.arg_filtered_series(pav_cfg, args, verbose=self.errfile) - test_series = list(map(lambda x: series.TestSeries.load(pav_cfg, x.sid), sinfos)) + if len(series) > 0: + sinfos = cmd_utils.arg_filtered_series( + pav_cfg, + series, + verbose=self.errfile) + test_series = list(map(lambda x: TestSeries.load(pav_cfg, x.id), sinfos)) sers_ret = cancel_utils.cancel_series(test_series, self.outfile) return test_ret or sers_ret diff --git a/lib/pavilion/commands/cat.py b/lib/pavilion/commands/cat.py index 7d9449971..fe6366aaa 100644 --- a/lib/pavilion/commands/cat.py +++ b/lib/pavilion/commands/cat.py @@ -23,7 +23,7 @@ def __init__(self): def _setup_arguments(self, parser): parser.add_argument( - 'test_id', help="test id", + 'test_id', type=TestID, help="test id", nargs='?', default=None, metavar='TEST_ID' ) @@ -43,11 +43,8 @@ def run(self, pav_cfg, args): if test_id is None: output.fprint(self.errfile, "No last test found.", color=output.RED) return 1 - elif TestID.is_valid_id(args.test_id): - test_id = TestID(args.test_id) else: - output.fprint(self.errfile, f"{args.test_id} is not a valid test ID.") - return errno.EEXIST + test_id = args.test_id tests = cmd_utils.get_tests_by_id(pav_cfg, [test_id], self.errfile) if not tests: @@ -56,7 +53,7 @@ def run(self, pav_cfg, args): elif len(tests) > 1: output.fprint( self.errfile, "Matched multiple tests. Printing file contents for first " - "test only (test {})".format(tests[0].full_id), + "test only (test {})".format(tests[0].id), color=output.YELLOW) test = tests[0] @@ -67,7 +64,7 @@ def run(self, pav_cfg, args): if not test.path/args.file: output.fprint(sys.stderr, "File {} does not exist for test {}." - .format(args.file, test.full_id)) + .format(args.file, test.id)) return errno.EEXIST return self.print_file(test.path / args.file) diff --git a/lib/pavilion/commands/graph.py b/lib/pavilion/commands/graph.py index 55a7055f4..92949f864 100644 --- a/lib/pavilion/commands/graph.py +++ b/lib/pavilion/commands/graph.py @@ -82,7 +82,7 @@ def _setup_arguments(self, parser): filters.add_test_filter_args(parser) parser.add_argument( - 'tests', nargs='*', default=[], action='store', + 'tests', nargs='*', type=TestID, default=[], action='store', help='Specific Test Ids to graph. ' ) parser.add_argument( @@ -90,7 +90,7 @@ def _setup_arguments(self, parser): help='Desired name of graph when saved to PNG.' ) parser.add_argument( - '--exclude', default=[], action='append', + '--exclude', type=TestID, default=[], action='append', help='Exclude specific Test Ids from the graph.' ) parser.add_argument( @@ -140,10 +140,15 @@ def run(self, pav_cfg, args): output.fprint(self.outfile, "Generating Graph...") - args.tests = resolve_mixed_ids(args.tests, auto_last=True) - # Get filtered Test IDs. - test_paths = cmd_utils.arg_filtered_tests(pav_cfg, args, verbose=self.errfile).paths + test_paths = cmd_utils.arg_filtered_tests( + pav_cfg, + args.tests, + [], + filter_query=args.filter, + sort_by=args.sort_by, + limit=args.limit, + verbose=self.errfile).paths # Load TestRun for all tests, skip those that are to be excluded. tests = cmd_utils.get_tests_by_paths( diff --git a/lib/pavilion/commands/group.py b/lib/pavilion/commands/group.py index dc4b04934..8d8059ba0 100644 --- a/lib/pavilion/commands/group.py +++ b/lib/pavilion/commands/group.py @@ -8,8 +8,11 @@ from pavilion import output from pavilion.output import fprint, draw_table from pavilion.enums import Verbose +from pavilion.test_run import TestRun +from pavilion.series import TestSeries from pavilion.groups import TestGroup from pavilion.errors import TestGroupError +from pavilion.test_ids import GroupID, resolve_mixed_ids from .base_classes import Command, sub_cmd @@ -50,6 +53,7 @@ def _setup_arguments(self, parser): add_p.add_argument( 'group', + type=GroupID, help="The group to add to.") add_p.add_argument( 'items', nargs='+', @@ -64,7 +68,7 @@ def _setup_arguments(self, parser): description="Remove all given ID's (test/series/group) from the group.") remove_p.add_argument( - 'group', help="The group to remove items from.") + 'group', type=GroupID, help="The group to remove items from.") remove_p.add_argument( 'items', nargs='+', help="Test run, test series, and group ID's to remove, as per `pav group add`.") @@ -73,15 +77,15 @@ def _setup_arguments(self, parser): 'delete', help="Delete the given group entirely.") delete_p.add_argument( - 'group', help="The group to delete.") + 'group', type=GroupID, help="The group to delete.") rename_p = subparsers.add_parser( 'rename', help="Rename a group.") rename_p.add_argument( - 'group', help="The group to rename.") + 'group', type=GroupID, help="The group to rename.") rename_p.add_argument( - 'new_name', help="The new name for the group") + 'new_name', type=GroupID, help="The new name for the group") rename_p.add_argument( '--no-redirect', action='store_true', default=False, help="By default, groups that point to this group are redirected to the new name. " @@ -103,7 +107,7 @@ def _setup_arguments(self, parser): "include those attached indirectly through series. To see all tests " "in a group, use `pav status`.") member_p.add_argument( - 'group', help="The group to list.") + 'group', type=GroupID, help="The group to list.") member_p.add_argument( '--recursive', '-r', action='store_true', default=False, help="Recursively list members of child groups as well.") @@ -122,7 +126,7 @@ def run(self, pav_cfg, args): return self._run_sub_command(pav_cfg, args) - def _get_group(self, pav_cfg, group_name: str) -> TestGroup: + def _get_group(self, pav_cfg, group_name: GroupID) -> TestGroup: """Get the requested group, and print a standard error message on failure.""" try: @@ -154,19 +158,23 @@ def _add_cmd(self, pav_cfg, args): fprint(self.errfile, err.pformat()) return 1 - added, errors = group.add(args.items) + ids = resolve_mixed_ids(args.items) + items = ids["tests"] + ids["series"] + ids["groups"] + + added, errors = group.add(items) if errors: fprint(self.errfile, "There were one or more errors when adding tests.", color=output.RED) for error in errors: fprint(self.errfile, error.pformat(), '\n') - existed = len(args.items) - len(added) - len(errors) + existed = len(items) - len(added) - len(errors) fprint(self.outfile, "Added {} item{} to the group ({} already existed)." .format(len(added), '' if len(added) == 1 else 's', existed)) if errors: + # import pdb; pdb.set_trace() return 1 else: return 0 @@ -179,7 +187,10 @@ def _remove_cmd(self, pav_cfg, args): if group is None: return 1 - removed, errors = group.remove(args.items) + ids = resolve_mixed_ids(args.items) + items = ids["tests"] + ids["series"] + ids["groups"] + + removed, errors = group.remove(items) if errors: fprint(self.errfile, "There were one or more errors when removing tests.", color=output.RED) @@ -244,7 +255,7 @@ def _list_cmd(self, pav_cfg, args): else: continue - group = TestGroup(pav_cfg, group_dir.name) + group = TestGroup(pav_cfg, GroupID(group_dir.name)) groups_info.append(group.info()) groups_info.sort(key=lambda v: v['created'], reverse=True) @@ -281,11 +292,11 @@ def _members_cmd(self, pav_cfg, args): filtered_members = [] for mem in members: - if show_tests and mem['itype'] == TestGroup.TEST_ITYPE: + if show_tests and mem['itype'] == TestRun: filtered_members.append(mem) - elif show_series and mem['itype'] == TestGroup.SERIES_ITYPE: + elif show_series and mem['itype'] == TestSeries: filtered_members.append(mem) - elif show_groups and mem['itype'] == TestGroup.GROUP_ITYPE: + elif show_groups and mem['itype'] == TestGroup: filtered_members.append(mem) members = filtered_members @@ -294,13 +305,22 @@ def _members_cmd(self, pav_cfg, args): if args.recursive: fields.insert(0, 'group') + def type_transform(type: type) -> str: + if type == TestRun: + return "test" + elif type == TestSeries: + return "series" + else: + return "group" + draw_table( self.outfile, rows=members, fields=fields, field_info={ - 'itype': {'title': 'type'}, - 'created': {'transform': output.get_relative_timestamp} + 'itype': {'title': 'type', 'transform': type_transform}, + 'created': {'transform': output.get_relative_timestamp}, + 'id': {'transform': str} }) return 0 diff --git a/lib/pavilion/commands/list_cmd.py b/lib/pavilion/commands/list_cmd.py index 3cca592e2..b0a75cea6 100644 --- a/lib/pavilion/commands/list_cmd.py +++ b/lib/pavilion/commands/list_cmd.py @@ -2,15 +2,17 @@ undefined) bits.""" import errno +from argparse import Namespace from typing import List, Dict, Mapping from pavilion import arguments from pavilion import cmd_utils from pavilion import filters from pavilion import output +from pavilion.config import PavConfig from pavilion.series.info import SeriesInfo from pavilion.test_run import TestAttributes -from pavilion.test_ids import resolve_mixed_ids, SeriesID +from pavilion.test_ids import resolve_mixed_ids, SeriesID, TestID from .base_classes import Command, sub_cmd @@ -122,7 +124,7 @@ def _setup_arguments(self, parser): ) series_p.add_argument( - 'series', nargs="*", default=['all'], + 'series', nargs="*", type=SeriesID, default=[SeriesID("all")], help="Specific series to filter from. Defaults to 'all'" ) @@ -230,9 +232,18 @@ def _test_runs_cmd(self, pav_cfg, args): avail_fields=TestAttributes.list_attrs() ) - args.tests = resolve_mixed_ids(args.tests, auto_last=True) + ids = resolve_mixed_ids(args.tests, auto_last=True) + tests = ids["tests"] + series = ids["series"] - test_runs = cmd_utils.arg_filtered_tests(pav_cfg, args, verbose=self.errfile).data + test_runs = cmd_utils.arg_filtered_tests( + pav_cfg, + tests, + series, + filter_query=args.filter, + sort_by=args.sort_by, + limit=args.limit, + verbose=self.errfile).data def remove_nones(run: Mapping) -> Dict: return { k: v for k, v in run.items() if v not in [None, ''] } @@ -251,7 +262,7 @@ def remove_nones(run: Mapping) -> Dict: SERIES_LONG_FIELDS = ['id', 'user', 'created', 'num_tests'] @sub_cmd() - def _series_cmd(self, pav_cfg, args): + def _series_cmd(self, pav_cfg: PavConfig, args: Namespace) -> int: """Print info on each series.""" series_attrs = { @@ -265,14 +276,18 @@ def _series_cmd(self, pav_cfg, args): fields, mode = self.get_fields( fields_arg=args.out_fields, mode_arg=args.output_mode, - default_single_field='sid', + default_single_field='id', default_fields=self.SERIES_LONG_FIELDS, avail_fields=list(series_attrs.keys()), ) - args.series = resolve_mixed_ids(args.series, auto_last=True) - - series = cmd_utils.arg_filtered_series(pav_cfg, args, verbose=self.errfile) + series = cmd_utils.arg_filtered_series( + pav_cfg, + args.series, + filter_query=args.filter, + sort_by=args.sort_by, + limit=args.limit, + verbose=self.errfile) series = [series_info.attr_dict() for series_info in series] self.write_output( @@ -283,3 +298,5 @@ def _series_cmd(self, pav_cfg, args): vsep=args.vsep, wrap=args.wrap, ) + + return 0 diff --git a/lib/pavilion/commands/log.py b/lib/pavilion/commands/log.py index b24b4b671..7d9d3f374 100644 --- a/lib/pavilion/commands/log.py +++ b/lib/pavilion/commands/log.py @@ -11,6 +11,7 @@ from pavilion import series, series_config from pavilion import cmd_utils from pavilion.test_run import TestRun +from pavilion.test_ids import TestID, SeriesID from .base_classes import Command @@ -146,11 +147,20 @@ def run(self, pav_cfg, args): else: cmd_name = args.log_cmd - if cmd_name == 'states': + if hasattr(args, "id"): if args.id is None: + args.id = SeriesID("last") + else: + if TestID.is_valid_id(args.id): + args.id = TestID(args.id) + else: + args.id = SeriesID(args.id) + + if cmd_name == 'states': + if args.id == SeriesID("last"): args.id = cmd_utils.get_last_test_id(pav_cfg, self.errfile) - if args.id is None: + if args.id == SeriesID("last"): output.fprint(self.errfile, "No last test found.", color=output.RED) return 1 @@ -165,7 +175,7 @@ def run(self, pav_cfg, args): else: try: if cmd_name == 'series': - if args.id is None: + if args.id is SeriesID("last"): test = cmd_utils.load_last_series(pav_cfg, self.errfile) if test is None: @@ -174,7 +184,7 @@ def run(self, pav_cfg, args): else: test = series.TestSeries.load(pav_cfg, args.id) else: - if args.id is None: + if args.id is SeriesID("last"): args.id = cmd_utils.get_last_test_id(pav_cfg, self.errfile) if args.id is None: @@ -242,7 +252,7 @@ def run(self, pav_cfg, args): break return 0 - def _states(self, pav_cfg, test_id: str, raw: bool = False, raw_time: bool = False): + def _states(self, pav_cfg, test_id: TestID, raw: bool = False, raw_time: bool = False): """Print the states for a test.""" try: diff --git a/lib/pavilion/commands/ls.py b/lib/pavilion/commands/ls.py index 527f9b9f0..5cd93f474 100644 --- a/lib/pavilion/commands/ls.py +++ b/lib/pavilion/commands/ls.py @@ -30,6 +30,7 @@ def _setup_arguments(self, parser): parser.add_argument( 'test_id', nargs='?', + type=TestID, default=None, help="Test id number.", metavar='TEST_ID', @@ -87,20 +88,14 @@ def run(self, pav_cfg, args): output.fprint(self.errfile, "No last test found.", color=output.RED) return errno.EEXIST - elif TestID.is_valid_id(args.test_id): - test_id = TestID(args.test_id) - else: - output.fprint(self.errfile, f"{args.test_id} is not a valid test ID.") - return errno.EEXIST - - tests = cmd_utils.get_tests_by_id(pav_cfg, [test_id], self.errfile) + tests = cmd_utils.get_tests_by_id(pav_cfg, [args.test_id], self.errfile) if not tests: output.fprint(self.errfile, "Could not find test '{}'".format(test_id)) return errno.EEXIST elif len(tests) > 1: output.fprint( self.errfile, "Matched multiple tests. Listing files for first " - "test only (test {})".format(tests[0].full_id), + "test only (test {})".format(tests[0].id), color=output.YELLOW) test = tests[0] diff --git a/lib/pavilion/commands/result.py b/lib/pavilion/commands/result.py index e46f41af3..d795abb3a 100644 --- a/lib/pavilion/commands/result.py +++ b/lib/pavilion/commands/result.py @@ -9,6 +9,7 @@ import shutil from math import log10, floor import re +from argparse import Namespace from typing import List, IO, Union, Optional, Any from pavilion.errors import TestConfigError, ResultError @@ -21,6 +22,7 @@ from pavilion import result from pavilion import result_utils from pavilion import utils +from pavilion.config import PavConfig from pavilion.test_ids import resolve_mixed_ids from pavilion.status_file import STATES from pavilion.test_run import TestRun @@ -151,12 +153,21 @@ def _setup_arguments(self, parser): ) filters.add_test_filter_args(parser) - def run(self, pav_cfg, args): + def run(self, pav_cfg: PavConfig, args: Namespace) -> int: """Print the test results in a variety of formats.""" - args.tests = resolve_mixed_ids(args.tests) - test_paths = cmd_utils.arg_filtered_tests(pav_cfg, args, - verbose=self.errfile).paths + ids = resolve_mixed_ids(args.tests) + tests = ids["tests"] + series = ids["series"] + + test_paths = cmd_utils.arg_filtered_tests( + pav_cfg, + tests, + series, + filter_query=args.filter, + sort_by=args.sort_by, + limit=args.limit, + verbose=self.errfile).paths tests = cmd_utils.get_tests_by_paths(pav_cfg, test_paths, self.errfile) log_file = None @@ -307,7 +318,7 @@ def run(self, pav_cfg, args): self.errfile, "One or more of the requested tests never completed, and therefore have no " "results to 're-run'. Check the status and/or logs for these tests to see why:\n" - + ", ".join([test.full_id for test in skipped_reruns]), + + ", ".join([str(test.id) for test in skipped_reruns]), color=output.YELLOW) if args.all_passed and not all_passed: diff --git a/lib/pavilion/commands/run.py b/lib/pavilion/commands/run.py index 6c087d3c2..a1fee2f1f 100644 --- a/lib/pavilion/commands/run.py +++ b/lib/pavilion/commands/run.py @@ -15,6 +15,7 @@ from pavilion.series.series import TestSeries from pavilion.series_config import generate_series_config from pavilion.status_utils import print_from_tests +from pavilion.test_ids import GroupID from .base_classes import Command @@ -92,7 +93,7 @@ def _generic_arguments(parser): '\'key=value\', where key is the dot separated key name, ' 'and value is a json object. Example: `-c schedule.nodes=23`') parser.add_argument( - '-g', '--group', action="store", type=str, + '-g', '--group', action="store", type=GroupID, help="Add the created test series to the given group, creating it if necessary.") parser.add_argument( '-v', '--verbosity', choices=[verb.name for verb in Verbose], diff --git a/lib/pavilion/commands/series.py b/lib/pavilion/commands/series.py index ef996f8f8..fe58b9fdb 100644 --- a/lib/pavilion/commands/series.py +++ b/lib/pavilion/commands/series.py @@ -58,7 +58,8 @@ def _setup_arguments(self, parser): 'cancel', help="Cancel a series or series. Defaults to the your last series on this system.") filters.add_series_filter_args(cancel_p, sort_keys=[], disable_opts=['sys-name']) - cancel_p.add_argument('series', nargs='*', help="One or more series to cancel") + cancel_p.add_argument('series', type=SeriesID, nargs='*', + help="One or more series to cancel") list_p = subparsers.add_parser( 'list', @@ -84,7 +85,7 @@ def _setup_arguments(self, parser): formatter_class=arguments.WrappedFormatter) list_p.add_argument( - 'series', nargs='*', + 'series', nargs='*', type=SeriesID, default=[SeriesID("all")], help="Specific series to show. Defaults to all your recent series on this cluster.", ) filters.add_series_filter_args(list_p) @@ -138,7 +139,7 @@ def _setup_arguments(self, parser): "`pav series status`") set_status_p.add_argument('--merge-repeats', '-m', default=False, action='store_true', help='Merge data from all repeats of each set.') - set_status_p.add_argument('series', default='last', nargs='?', + set_status_p.add_argument('series', type=SeriesID, default=SeriesID("last"), nargs='?', help='The series to print the sets for.') state_p = subparsers.add_parser( @@ -154,7 +155,7 @@ def _setup_arguments(self, parser): state_p_filter_args.add_argument( '--skipped', action='store_true', default=False, help="List only skipped test reasons.") - state_p.add_argument('series', default='last', nargs='?', + state_p.add_argument('series', type=SeriesID, default=SeriesID("last"), nargs='?', help="The series to print status history for.") def _find_series(self, pav_cfg, series_name): @@ -217,7 +218,7 @@ def _run_cmd(self, pav_cfg, args): group.add([series_obj]) except groups.TestGroupError as err: output.fprint(self.errfile, "Error adding series '{}' to group '{}'." - .format(series_obj.sid, group.name), color=output.RED) + .format(series_obj.id, group.name), color=output.RED) output.fprint(self.errfile, err.pformat()) return errno.EINVAL output.fprint(self.errfile, @@ -243,19 +244,22 @@ def _run_cmd(self, pav_cfg, args): "Run `pav series status {sid}` to view series status.\n" "Run `pav series cancel {sid}` to cancel the series (and all its tests).\n" "Run `pav series sets {sid}` to view status of individual test sets." - .format(sid=series_obj.sid)) + .format(sid=series_obj.id)) self.last_run_series = series_obj return 0 @sub_cmd(*LIST_ALIASES) - def _list_cmd(self, pav_cfg, args): + def _list_cmd(self, pav_cfg: PavConfig, args: Namespace) -> int: """List series.""" - args.series = resolve_mixed_ids(args.series, auto_last=True) matched_series = cmd_utils.arg_filtered_series( - pav_cfg=pav_cfg, args=args, verbose=self.errfile) + pav_cfg, + args.series, + filter_query=args.filter, + limit=args.limit, + verbose=self.errfile) rows = [ser.attr_dict() for ser in matched_series] @@ -388,7 +392,7 @@ def _merge_sets(self, set1: dict, set2: dict, keys: List[str]) -> dict: def _state_history_cmd(self, pav_cfg: config.PavConfig, args): """Print the full status history for a series.""" - if args.series == 'last': + if args.series == SeriesID("last"): ser = cmd_utils.load_last_series(pav_cfg, self.errfile) if ser is None: return errno.EINVAL @@ -433,25 +437,29 @@ def _state_history_cmd(self, pav_cfg: config.PavConfig, args): def _cancel_cmd(self, pav_cfg: PavConfig, args: Namespace) -> int: """Cancel all series found given the arguments.""" - args.series = resolve_mixed_ids(args.series, auto_last=True) - series_info = cmd_utils.arg_filtered_series(pav_cfg, args, verbose=self.errfile) + series_info = cmd_utils.arg_filtered_series( + pav_cfg, + args.series, + filter_query=args.filter, + limit=args.limit, + verbose=self.errfile) output.fprint(self.outfile, "Found {} series to cancel.".format(len(series_info))) chosen_series = [] for ser in series_info: try: - loaded_ser = series.TestSeries.load(pav_cfg, ser.sid) + loaded_ser = series.TestSeries.load(pav_cfg, ser.id) chosen_series.append(loaded_ser) except series.TestSeriesError as err: output.fprint(self.errfile, "Could not load found series '{}': {}" - .format(ser.sid, err.args[0])) + .format(ser.id, err.args[0])) tests_to_cancel = [] for ser in chosen_series: # We'll cancel the tests verbosely. ser.cancel(message="By user {}".format(utils.get_login()), cancel_tests=False) - output.fprint(self.outfile, "Series {} cancelled.".format(ser.sid)) + output.fprint(self.outfile, "Series {} cancelled.".format(ser.id)) tests_to_cancel.extend(ser.tests.values()) diff --git a/lib/pavilion/commands/set_status.py b/lib/pavilion/commands/set_status.py index 6891310ef..624438990 100644 --- a/lib/pavilion/commands/set_status.py +++ b/lib/pavilion/commands/set_status.py @@ -32,8 +32,8 @@ def _setup_arguments(self, parser): help='Note to set for the test, tests, or suite of tests.' ) parser.add_argument( - 'test', action='store', metavar='', - help='The name of the test to set the status of. If no value is ' + 'test', action='store', type=TestID, metavar='', + help='The ID of the test to set the status of. If no value is ' 'provided, the most recent suite submitted by this user is ' 'used.' ) @@ -43,16 +43,10 @@ def run(self, pav_cfg, args): # Zero is given as the default when running test scripts outside of # Pavilion. - if args.test == 0: + if args.test.id == 0: return 0 - if TestID.is_valid_id(args.test): - test_id = TestID(args.test) - else: - output.fprint(self.errfile, f"{args.test} is not a valid test ID.") - return errno.EEXIST - - tests = cmd_utils.get_tests_by_id(pav_cfg, [test_id], self.errfile) + tests = cmd_utils.get_tests_by_id(pav_cfg, [args.test], self.errfile) if not tests: output.fprint(self.errfile, "Test {} could not be opened.".format(args.test), diff --git a/lib/pavilion/commands/status.py b/lib/pavilion/commands/status.py index da5fcd4f1..a32c8c847 100644 --- a/lib/pavilion/commands/status.py +++ b/lib/pavilion/commands/status.py @@ -2,11 +2,13 @@ other commands to print statuses.""" import errno +from argparse import Namespace from pavilion import cmd_utils from pavilion import filters from pavilion import output from pavilion import status_utils +from pavilion.config import PavConfig from pavilion.test_ids import resolve_mixed_ids, SeriesID from pavilion.errors import PavilionError from .base_classes import Command @@ -50,14 +52,25 @@ def _setup_arguments(self, parser): filters.add_test_filter_args(parser) - def run(self, pav_cfg, args): + def run(self, pav_cfg: PavConfig, args: Namespace) -> int: """Gathers and prints the statuses from the specified test runs and/or series.""" - args.tests = resolve_mixed_ids(args.tests) + show_series = args.series + + ids = resolve_mixed_ids(args.tests) + tests = ids["tests"] + series = ids["series"] try: - test_paths = cmd_utils.arg_filtered_tests(pav_cfg, args, verbose=self.errfile).paths + test_paths = cmd_utils.arg_filtered_tests( + pav_cfg, + tests, + series, + filter_query=args.filter, + sort_by=args.sort_by, + limit=args.limit, + verbose=self.errfile).paths except (ValueError, PavilionError) as err: output.fprint(self.errfile, err, color=output.RED) return errno.EINVAL @@ -78,7 +91,7 @@ def run(self, pav_cfg, args): return self.print_summary(statuses) else: return status_utils.print_status(statuses, self.outfile, json=args.json, - series=args.series, note=args.note, + series=show_series, note=args.note, sorter=args.sort_by) def print_summary(self, statuses): diff --git a/lib/pavilion/commands/wait.py b/lib/pavilion/commands/wait.py index f0edd9146..decbd2201 100644 --- a/lib/pavilion/commands/wait.py +++ b/lib/pavilion/commands/wait.py @@ -51,7 +51,7 @@ def _setup_arguments(self, parser): ) parser.add_argument( 'tests', nargs='*', action='store', - help='The name(s) of the tests to check. These may be any mix of ' + help='The ID(s) of the tests to check. These may be any mix of ' 'test IDs and series IDs. If no value is provided, the most ' 'recent series submitted by this user is checked.' ) @@ -73,7 +73,11 @@ def run(self, pav_cfg, args): # get start time start_time = time.time() - args.tests = resolve_mixed_ids(args.tests, auto_last=True) + + ids = resolve_mixed_ids(args.tests, auto_last=True) + args.tests = ids["tests"] + args.series = ids["series"] + tests = cmd_utils.get_tests_by_id(pav_cfg, args.tests, self.errfile) # determine timeout time, if there is one @@ -93,7 +97,6 @@ def wait(self, pav_cfg, tests: List[TestRun], done_tests = [] all_tests = list(tests) - all_tests.sort(key=lambda t: t.full_id) tests = list(tests) diff --git a/lib/pavilion/dir_db.py b/lib/pavilion/dir_db.py index 8e9f8de14..b600e2995 100644 --- a/lib/pavilion/dir_db.py +++ b/lib/pavilion/dir_db.py @@ -12,9 +12,10 @@ from concurrent.futures import ThreadPoolExecutor from functools import partial from pathlib import Path -from typing import Callable, List, Iterable, Any, Dict, NewType, \ - Union, NamedTuple, IO, Tuple +from typing import Callable, List, Iterable, Any, Dict, NewType, Optional, \ + Union, NamedTuple, IO, Tuple, TypeVar +from pavilion.config import PavConfig from pavilion import lockfile from pavilion import output @@ -27,7 +28,12 @@ LOGGER = logging.getLogger(__file__) -def make_id_path(base_path, id_) -> Path: +SelectItems = NamedTuple("SelectItems", [('data', List[Dict[str, Any]]), + ('paths', List[Path])]) +T = TypeVar("T") + + +def make_id_path(base_path: Path, id_: Union[str, int]) -> Path: """Create the full path to an id directory given its base path and the id. @@ -36,7 +42,7 @@ def make_id_path(base_path, id_) -> Path: :rtype: Path """ - return base_path / (ID_FMT.format(id=id_)) + return base_path / str(id_) def reset_pkey(id_dir: Path) -> None: @@ -53,21 +59,26 @@ def reset_pkey(id_dir: Path) -> None: pass -def create_id_dir(id_dir: Path) -> (int, Path): +def create_id_dir(id_dir: Path, link_target: Optional[Path] = None, + next_fn: Optional[Path] = None) -> Tuple[int, Path]: """In the given directory, create the lowest numbered (positive integer) - directory that doesn't already exist. + directory that doesn't already exist. If link_target is given, create a + symlink to that target instead of a directory. :param id_dir: Path to the directory that contains these 'id' directories + :param link_target: Create the ID path as a symlink to the given target, rather than as + a directory. + :param: next_fn: File from which to read the next ID. :returns: The id and path to the created directory. :raises OSError: on directory creation failure. :raises TimeoutError: If we couldn't get the lock in time. """ lockfile_path = id_dir/'.lockfile' - with lockfile.LockFile(lockfile_path, timeout=1): - next_fn = id_dir/PKEY_FN + next_fn = next_fn or id_dir/PKEY_FN + with lockfile.LockFile(lockfile_path, timeout=1): next_valid = True if next_fn.exists(): @@ -103,7 +114,10 @@ def create_id_dir(id_dir: Path) -> (int, Path): next_id_path = make_id_path(id_dir, next_id) - next_id_path.mkdir() + if link_target is None: + next_id_path.mkdir() + else: + next_id_path.symlink_to(link_target) with next_fn.open('w') as next_file: next_file.write(str(next_id + 1)) @@ -116,21 +130,22 @@ def default_filter(_: Path) -> bool: return True -Index = NewType("Index", Dict[int, Dict['str', Any]]) +Index = NewType("Index", Dict[int, Dict[str, Any]]) -def identity(value): +def identity(value: T) -> T: """Because lambdas can't be pickled.""" return value -def index(pav_cfg, - id_dir: Path, idx_name: str, +def index(pav_cfg: PavConfig, + id_dir: Path, + idx_name: str, transform: Callable[[Path], Dict[str, Any]], complete_key: str = 'complete', refresh_period: int = 1, - verbose: IO[str] = None, - fn_base: int = 10) -> Index: + verbose: Optional[IO[str]] = None, + fn_base: int = 16) -> Index: """Load and/or update an index of the given directory for the given transform, and return it. The returned index is a dictionary by id of the transformed data. @@ -254,11 +269,11 @@ def do_transform(pair): return idx -SelectItems = NamedTuple("SelectItems", [('data', List[Dict[str, Any]]), - ('paths', List[Path])]) - - -def select_one(path, ffunc, trans, ofunc, fnb): +def select_one(path: Path, + ffunc: Optional[Callable[[Path], bool]], + trans: Optional[Callable[[Path], T]], + ofunc: Callable[[T], Any], + fnb: int) -> Optional[T]: """Allows the objects to be filtered and transformed in parallel with map. :param path: Path to filter and transform (input to reduced function) @@ -294,17 +309,17 @@ def select_one(path, ffunc, trans, ofunc, fnb): return item -def select(pav_cfg, +def select(pav_cfg: PavConfig, id_dir: Path, filter_func: Callable[[Any], bool] = default_filter, - transform: Callable[[Path], Any] = None, - order_func: Callable[[Any], Any] = None, + transform: Optional[Callable[[Path], Any]] = None, + order_func: Optional[Callable[[Dict[str, Any]], Any]] = None, order_asc: bool = True, - fn_base: int = 10, + fn_base: int = 16, idx_complete_key: 'str' = 'complete', use_index: Union[bool, str] = True, verbose: IO[str] = None, - limit: int = None) -> (List[Any], List[Path]): + limit: int = None) -> SelectItems: """Filter and order found paths in the id directory based on the filter and other parameters. If a transform is given, this will create an index of the data returned by the transform to hasten this process. @@ -378,15 +393,14 @@ def select(pav_cfg, fn_base=fn_base, limit=limit) - -def select_from(pav_cfg, +def select_from(pav_cfg: PavConfig, paths: Iterable[Path], - filter_func: Callable[[Any], bool] = default_filter, - transform: Callable[[Path], Any] = None, - order_func: Callable[[Any], Any] = None, + filter_func: Callable[[T], bool] = default_filter, + transform: Optional[Callable[[Path], T]] = None, + order_func: Optional[Callable[[T], Any]] = None, order_asc: bool = True, - fn_base: int = 10, - limit: int = None) -> (List[Any], List[Path]): + fn_base: int = 16, + limit: int = None) -> SelectItems: """Filter, order, and truncate the given paths based on the filter and other parameters. diff --git a/lib/pavilion/filters/filters.py b/lib/pavilion/filters/filters.py index 81c1c32b8..21b0f88ce 100644 --- a/lib/pavilion/filters/filters.py +++ b/lib/pavilion/filters/filters.py @@ -5,7 +5,7 @@ from functools import partial from pathlib import Path from enum import Enum, auto -from typing import Dict, Any, Callable, List, Mapping +from typing import Dict, Any, Callable, List, Mapping, TypeVar, Tuple from lark import Lark from lark.exceptions import UnexpectedInput @@ -77,10 +77,13 @@ " user=USER Include only tests/series started by this user. \n") +T = TypeVar("T") + + filter_parser = Lark.open(GRAMMAR_PATH, start="expr") -def sort_func(test_attrs: Mapping, key: str) -> Any: +def sort_func(test_attrs: Mapping[str, T], key: str) -> T: """Use partial to reduce inputs and use as key in sort function. Sort by default key if given key is invalid at this stage. @@ -224,7 +227,7 @@ def add_series_filter_args(arg_parser: argparse.ArgumentParser, def get_sort_opts( sort_name: str, - stype: str) -> (Callable[[Path], Any], bool): + stype: str) -> Tuple[Callable[[Mapping[str, T]], T], bool]: """Return a sort function and sort order. :param sort_name: The name of the sort, possibly prepended with -. diff --git a/lib/pavilion/groups.py b/lib/pavilion/groups.py index f44faa1aa..911d32d64 100644 --- a/lib/pavilion/groups.py +++ b/lib/pavilion/groups.py @@ -1,19 +1,23 @@ """Groups are a named collection of series and tests. They can be manipulated with the `pav group` command.""" +# pylint: disable=invalid-name + from pathlib import Path import re import shutil -from typing import NewType, List, Tuple, Union, Dict +from typing import NewType, List, Tuple, Union, Dict, Any import uuid from pavilion import config from pavilion.errors import TestGroupError from pavilion.series import TestSeries, list_series_tests, SeriesInfo from pavilion.test_run import TestRun, TestAttributes +from pavilion.test_ids import ID, TestID, SeriesID, GroupID from pavilion.utils import is_int -GroupMemberDescr = NewType('GroupMemberDescr', Union[TestRun, TestSeries, "TestGroup", str]) +GroupMemberDescr = NewType('GroupMemberDescr', Union[TestRun, TestSeries, "TestGroup", + TestID, SeriesID, GroupID]) FlexDescr = NewType('FlexDescr', Union[List[GroupMemberDescr], GroupMemberDescr]) @@ -25,22 +29,13 @@ class TestGroup: SERIES_DIR = 'series' EXCLUDED_DIR = 'excluded' - TEST_ITYPE = 'test' - SERIES_ITYPE = 'series' - GROUP_ITYPE = 'group' - EXCL_ITYPE = 'test*' - - group_name_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9_-]+$') - - def __init__(self, pav_cfg: config.PavConfig, name: str): + def __init__(self, pav_cfg: config.PavConfig, name: GroupID): self.pav_cfg = pav_cfg - self._check_name(name) - self.name = name - self.path = self.pav_cfg.working_dir/self.GROUPS_DIR/self.name + self.path = self.pav_cfg.working_dir/self.GROUPS_DIR/str(self.name) if self.path.exists(): self.created = True @@ -72,17 +67,17 @@ def exists(self) -> bool: return self.path.exists() - def info(self) -> Dict: + def info(self) -> Dict[str, Any]: """Return some basic group info. Number of tests, series, sub-groups, creation time.""" info = { 'name': self.name, 'created': self.path.stat().st_mtime, } - for cat_type, cat_dir in ( - (self.TEST_ITYPE, self.TESTS_DIR), - (self.SERIES_ITYPE, self.SERIES_DIR), - (self.GROUP_ITYPE, self.GROUPS_DIR),): + for _, cat_dir in ( + (TestRun, self.TESTS_DIR), + (TestSeries, self.SERIES_DIR), + (self.__class__, self.GROUPS_DIR),): cat_path = self.path/cat_dir if not cat_path.exists(): @@ -92,7 +87,7 @@ def info(self) -> Dict: return info - def tests(self, seen_groups=None) -> List[Path]: + def tests(self, seen_groups: List[GroupID] = None) -> List[Path]: """Returns a list of paths to all tests in this group. Use with cmd_utils.get_tests_by_paths to convert to real test objects. Bad links are ignored. Groups are recursively examined (loops are allowed, but not followed). @@ -138,7 +133,8 @@ def tests(self, seen_groups=None) -> List[Path]: .format(series_dir.name, self.name), prior_error=err) - tests.extend(list_series_tests(self.pav_cfg, series_dir.name)) + sid = SeriesID(f"s{series_dir.name}") + tests.extend(list_series_tests(self.pav_cfg, sid)) except OSError as err: raise TestGroupError( @@ -149,8 +145,9 @@ def tests(self, seen_groups=None) -> List[Path]: try: if (self.path/self.GROUPS_DIR).exists(): for group_file in (self.path/self.GROUPS_DIR).iterdir(): - group_name = group_file.name + group_name = GroupID(group_file.name) sub_group = TestGroup(self.pav_cfg, group_name) + if group_name not in seen_groups: tests.extend(sub_group.tests(seen_groups=seen_groups)) @@ -169,7 +166,7 @@ def _has_test(self, test_path: Path) -> bool: return test_path in self.tests() - def add(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: + def add(self, items: FlexDescr) -> Tuple[List[ID], List[TestGroupError]]: """Add each of the given items to the group. Accepts TestRun, TestSeries, and TestGroup objects, as well as just the test/series(sid)/group names as strings. @@ -200,11 +197,11 @@ def add(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: # Get a string a name for the item, and the path to the actual item. try: - if itype == self.TEST_ITYPE: + if itype == TestRun: iname, dest_path = self._get_test_info(item) - elif itype == self.SERIES_ITYPE: + elif itype == TestSeries: iname, dest_path = self._get_series_info(item) - elif itype == self.GROUP_ITYPE: + elif itype == TestGroup: if isinstance(item, TestGroup): agroup = item else: @@ -227,7 +224,7 @@ def add(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: prior_error=err)) continue - if itype == self.TEST_ITYPE: + if itype == TestRun: if iname in self._excluded(): try: self._remove_excluded(iname) @@ -238,18 +235,18 @@ def add(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: prior_error=err)) if self._has_test(dest_path): - added.append((self.EXCL_ITYPE, iname)) + added.append(iname) continue try: # For tests and series, symlink to their directories. - if itype in (self.TEST_ITYPE, self.SERIES_ITYPE): + if itype in (TestRun, TestSeries): # Add the item, unless it just needed to be un-excluded. item_path.symlink_to(dest_path) # For groups, just touch a file of that name (prevents symlink loops). else: item_path.touch() - added.append((itype, iname)) + added.append(iname) except OSError as err: warnings.append( TestGroupError("Could not add {} '{}' to test group '{}'" @@ -258,7 +255,7 @@ def add(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: return added, warnings - def remove(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: + def remove(self, items: FlexDescr) -> Tuple[List[ID], List[TestGroupError]]: """Remove all of the given items from the group. Returns a list of warnings.""" removed = [] @@ -270,15 +267,13 @@ def remove(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: all_tests = None for item in items: - if isinstance(item, int): - item = str(item) itype, rmpath = self._get_member_info(item) if not rmpath.exists(): - if itype == self.TEST_ITYPE: + if itype == TestRun: try: - t_full_id, t_path = self._get_test_info(rmpath.name) + t_full_id, t_path = self._get_test_info(TestID(rmpath.name)) except TestGroupError as err: warnings.append( TestGroupError( @@ -292,7 +287,7 @@ def remove(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: if t_path in all_tests: self._add_excluded(t_full_id, t_path) - removed.append((self.EXCL_ITYPE, t_full_id)) + removed.append(t_full_id) continue warnings.append( @@ -302,7 +297,13 @@ def remove(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: try: rmpath.unlink() - removed.append((itype, rmpath.name)) + + if itype == TestRun: + removed.append(TestID(rmpath.name)) + elif itype == TestSeries: + removed.append(SeriesID(f"s{rmpath.name}")) + else: + removed.append(GroupID(rmpath.name)) except OSError: warnings.append( TestGroupError("Could not remove {} '{}' from group '{}'." @@ -311,7 +312,7 @@ def remove(self, items: FlexDescr) -> Tuple[List[str], List[TestGroupError]]: return removed, warnings - def members(self, recursive=False, seen_groups=None) -> List[Dict]: + def members(self, recursive: bool = False, seen_groups: List[GroupID] = None) -> List[Dict]: """Return a list of dicts of member info, keys 'itype', 'name'.""" seen_groups = seen_groups if seen_groups is not None else [] @@ -322,10 +323,10 @@ def members(self, recursive=False, seen_groups=None) -> List[Dict]: members = [] - for itype, type_dir in ( - (self.TEST_ITYPE, self.TESTS_DIR), - (self.SERIES_ITYPE, self.SERIES_DIR), - (self.GROUP_ITYPE, self.GROUPS_DIR)): + for itype, type_dir, id_type in ( + (TestRun, self.TESTS_DIR, TestID), + (TestSeries, self.SERIES_DIR, lambda x: SeriesID(f"s{x}")), + (TestGroup, self.GROUPS_DIR, GroupID)): try: for path in (self.path/type_dir).iterdir(): @@ -340,15 +341,15 @@ def members(self, recursive=False, seen_groups=None) -> List[Dict]: 'group': self.name, 'itype': itype, 'path': abs_path, - 'id': path.name,}) + 'id': id_type(path.name),}) - if recursive and itype == self.GROUP_ITYPE and path.name not in seen_groups: - try: - subgroup = self.__class__(self.pav_cfg, path.name) - except TestGroupError: - continue + if recursive and itype == TestGroup and GroupID(path.name) not in seen_groups: + try: + subgroup = self.__class__(self.pav_cfg, path.name) + except TestGroupError: + continue - members.extend(subgroup.members(recursive=True, seen_groups=seen_groups)) + members.extend(subgroup.members(recursive=True, seen_groups=seen_groups)) except OSError as err: raise TestGroupError( @@ -360,11 +361,11 @@ def members(self, recursive=False, seen_groups=None) -> List[Dict]: if path is None: continue - if mem_info['itype'] == self.TEST_ITYPE: + if mem_info['itype'] == TestRun: test_attrs = TestAttributes(mem_info['path']) mem_info['name'] = test_attrs.name mem_info['created'] = test_attrs.created - elif mem_info['itype'] == self.SERIES_ITYPE: + elif mem_info['itype'] == TestSeries: series_info = SeriesInfo(self.pav_cfg, path) mem_info['name'] = series_info.name mem_info['created'] = series_info.created @@ -374,14 +375,6 @@ def members(self, recursive=False, seen_groups=None) -> List[Dict]: mem_info['created'] = path.stat().st_mtime return members - def member_tuples(self) -> List[Tuple[str,str]]: - """As per 'members', but return a list of (item_type, item_id) tuples.""" - - tups = [] - for item in self.members(): - tups.append((item['itype'], item['id'])) - return tups - def clean(self) -> List[TestGroupError]: """Remove all dead links and group files, then delete the group if it's empty. Returns a list of errors/warnings.""" @@ -391,14 +384,14 @@ def clean(self) -> List[TestGroupError]: # Cleanup items for each item type (tests, series, groups) for itype, type_dir in ( - (self.TEST_ITYPE, self.TESTS_DIR), - (self.SERIES_ITYPE, self.SERIES_DIR), - (self.GROUP_ITYPE, self.GROUPS_DIR)): + (TestRun, self.TESTS_DIR), + (TestSeries, self.SERIES_DIR), + (TestGroup, self.GROUPS_DIR)): try: for item_path in (self.path/type_dir).iterdir(): # Skip that items that still exist. - if itype == self.GROUP_ITYPE: + if itype == TestGroup: if (self.path.parent/item_path.name).exists(): keepers = True continue @@ -445,16 +438,14 @@ def delete(self): self.created = False - def rename(self, new_name, redirect_parents=True): + def rename(self, new_name: GroupID, redirect_parents: bool = True) -> None: """Rename this group. :param redirect_parents: Search other test groups for inclusion of this group, and point them at the new name. """ - self._check_name(new_name) - - new_path = self.path.parent/new_name + new_path = self.path.parent/str(new_name) if new_path.exists(): raise TestGroupError("Renaming group '{}' to '{}' but a group already exists " @@ -471,8 +462,8 @@ def rename(self, new_name, redirect_parents=True): try: for group_path in self.path.parent.iterdir(): for sub_group in (group_path/self.GROUPS_DIR).iterdir(): - if sub_group.name == self.name: - new_sub_path = sub_group.parent/new_name + if sub_group.name == str(self.name): + new_sub_path = sub_group.parent/str(new_name) sub_group.rename(new_sub_path) except OSError as err: raise TestGroupError("Failed to redirect parents of group '{}' to the new name." @@ -481,57 +472,15 @@ def rename(self, new_name, redirect_parents=True): self.name = new_name self.path = new_path - def _check_name(self, name: str): - """Make sure the given test group name complies with the naming standard.""" - - if self.group_name_re.match(name) is None: - raise TestGroupError( - "Invalid group name '{}'\n" - "Group names must start with a letter, but can otherwise have any " - "combination of letters, numbers, underscores and dashes." - .format(name)) - if name[0] in ('s', 'S') and is_int(name[1:]): - raise TestGroupError( - "Invalid group name '{}'\n" - "Group name looks too much like a series ID." - .format(name)) - - def _get_test_info(self, test: Union[TestRun, str]) -> Tuple[str, Path]: + def _get_test_info(self, test: Union[TestRun, TestID]) -> Tuple[TestID, Path]: """Find the test full id and path from the given test information.""" if isinstance(test, TestRun): if not test.path.exists(): - raise TestGroupError("Test '{}' does not exist.".format(test.full_id)) - return test.full_id, test.path - - if isinstance(test, str): - if '.' in test: - cfg_label, test_id = test.split('.', maxsplit=1) - else: - cfg_label = config.DEFAULT_CONFIG_LABEL - test_id = test - - elif isinstance(test, int): - cfg_label = config.DEFAULT_CONFIG_LABEL - test_id = str(int) - # We'll use this as our full_id too. - test = test_id - - if not is_int(test_id): - raise TestGroupError( - "Invalid test id '{}' from test id '{}'.\n" - "Test id's must be a number, like 27." - .format(test_id, test)) - if cfg_label not in self.pav_cfg.configs: - raise TestGroupError( - "Invalid config label '{}' from test id '{}'.\n" - "No such Pavilion configuration directory exists. Valid config " - "labels are:\n {}" - .format(cfg_label, test, - '\n'.join([' - {}'.format(lbl for lbl in self.pav_cfg.configs)]))) + raise TestGroupError("Test '{}' does not exist.".format(test.id)) + return test.id, test.path - rel_cfg = self.pav_cfg.configs[cfg_label] - tpath = rel_cfg.working_dir/'test_runs'/test_id + tpath = self.pav_cfg.working_dir/'test_runs'/str(test) if not tpath.is_dir(): raise TestGroupError( @@ -540,27 +489,16 @@ def _get_test_info(self, test: Union[TestRun, str]) -> Tuple[str, Path]: return test, tpath - def _get_series_info(self, series: Union[TestSeries, str]) -> Tuple[str, Path]: + def _get_series_info(self, series: Union[TestSeries, SeriesID]) -> Tuple[SeriesID, Path]: """Get the sid and path for a series, given a flexible description.""" if isinstance(series, TestSeries): if not series.path.exists(): raise TestGroupError("Series '{}' at '{}' does not exist." - .format(series.sid, series.path)) - return series.sid, series.path - - series = str(series) - if series.startswith("s"): - series_id = series[1:] - sid = series - else: - sid = 's{}'.format(series) + .format(series.id, series.path)) + return series.id, series.path - if not is_int(series_id): - raise TestGroupError("Invalid series id '{}', not numeric id." - .format(series)) - - series_dir = self.pav_cfg.working_dir/'series'/series_id + series_dir = self.pav_cfg.working_dir/'series'/str(series.as_int()) if not series_dir.is_dir(): raise TestGroupError("Series directory for sid '{}' does not exist.\n" @@ -568,26 +506,22 @@ def _get_series_info(self, series: Union[TestSeries, str]) -> Tuple[str, Path]: return sid, series_dir - def _get_member_info(self, item: GroupMemberDescr) -> Tuple[str, Path]: + def _get_member_info(self, item: GroupMemberDescr) -> Tuple[type, Path]: """Figure out what type of item 'item' is, and return its type name and path in the group.""" if isinstance(item, TestRun): - return self.TEST_ITYPE, self.path/self.TESTS_DIR/item.full_id + return TestRun, self.path/self.TESTS_DIR/str(item.id) + elif isinstance(item, TestID): + return TestRun, self.path/self.TESTS_DIR/str(item) elif isinstance(item, TestSeries): - return self.SERIES_ITYPE, self.path/self.SERIES_DIR/item.sid + return TestSeries, self.path/self.SERIES_DIR/str(item.id.as_int()) + elif isinstance(item, SeriesID): + return TestSeries, self.path/self.SERIES_DIR/str(item.as_int()) elif isinstance(item, self.__class__): - return self.GROUP_ITYPE, self.path/self.GROUPS_DIR/item.name - elif isinstance(item, str): - if is_int(item) or '.' in item: - # Looks like a test id - return self.TEST_ITYPE, self.path/self.TESTS_DIR/item - elif item[0] == 's' and is_int(item[1:]): - # Looks like a sid - return self.SERIES_ITYPE, self.path/self.SERIES_DIR/item - else: - # Anything can only be a group - return self.GROUP_ITYPE, self.path/self.GROUPS_DIR/item + return self.__class__, self.path/self.GROUPS_DIR/str(item.name) + elif isinstance(item, GroupID): + return self.__class__, self.path/self.GROUPS_DIR/str(item) else: raise TestGroupError("Invalid group item '{}' given for removal.".format(item)) @@ -601,19 +535,19 @@ def _excluded(self) -> Dict[str, Path]: excluded = {} try: for test_path in (self.path/self.EXCLUDED_DIR).iterdir(): - full_id = test_path.name + id = TestID(test_path.name) test_path = test_path.resolve() if test_path.exists(): - excluded[full_id] = test_path + excluded[id] = test_path except (OSError, FileNotFoundError): pass return excluded - def _add_excluded(self, full_id: str, test_path: Path): + def _add_excluded(self, id: TestID, test_path: Path): """Add the given test path to the excluded directory.""" - path = self.path/self.EXCLUDED_DIR/full_id + path = self.path/self.EXCLUDED_DIR/str(id) try: if not path.exists(): @@ -623,10 +557,10 @@ def _add_excluded(self, full_id: str, test_path: Path): "Could not create test exclusion record at {}".format(path), prior_error=err) - def _remove_excluded(self, full_id: str): + def _remove_excluded(self, id: TestID): """Remove the test from the exclusion records.""" - path = self.path/self.EXCLUDED_DIR/full_id + path = self.path/self.EXCLUDED_DIR/str(id) try: if path.exists(): @@ -641,10 +575,19 @@ def _clean_excluded(self): root_path = self.path/self.EXCLUDED_DIR - for full_id, path in self._excluded().items(): + for id, path in self._excluded().items(): if not path.exists(): - ex_path = root_path/full_id + ex_path = root_path/id try: ex_path.unlink() except (OSError, FileNotFoundError) as err: pass + + def __contains__(self, item: ID) -> bool: + if isinstance(item, TestID): + return str(item) in map(lambda x: x.name, (self.path / self.TESTS_DIR).iterdir()) + elif isinstance(item, SeriesID): + return str(item.as_int()) in map(lambda x: x.name, + (self.path / self.SERIES_DIR).iterdir()) + else: + return str(item) in map(lambda x: x.name, (self.path / self.GROUPS_DIR).iterdir()) diff --git a/lib/pavilion/id_utils.py b/lib/pavilion/id_utils.py new file mode 100644 index 000000000..e1b8d1194 --- /dev/null +++ b/lib/pavilion/id_utils.py @@ -0,0 +1,73 @@ +"""Utilities for working with test, series, and group IDs. + +I'm putting this in its own file because it causes a circular import pretty much anywhere +else I try to put it. – HW""" + +import json +from pathlib import Path +from typing import TextIO, Optional + +from pavilion import utils +from pavilion import output +from pavilion.config import PavConfig +from pavilion.sys_vars import base_classes +from pavilion.test_ids import SeriesID, TestID, TestIDError + + +def load_user_series_id(pav_cfg: PavConfig, errfile: TextIO = None) -> Optional[SeriesID]: + """Load the last series id used by the current user.""" + + user = utils.get_login() + last_series_fn = pav_cfg.working_dir/'users' + last_series_fn /= '{}.json'.format(user) + + sys_vars = base_classes.get_vars(True) + sys_name = sys_vars['sys_name'] + + if not last_series_fn.exists(): + return None + + try: + with last_series_fn.open() as last_series_file: + sys_name_series_dict = json.load(last_series_file) + return SeriesID(sys_name_series_dict[sys_name].strip()) + except (IOError, OSError, KeyError) as err: + if errfile: + output.fprint(errfile, "Failed to read series id file '{}'" + .format(last_series_fn), err) + return None + +def resolve_relative_id(pav_cfg: PavConfig, working_dir: Path, test_id: TestID) -> TestID: + """Resolve a series-relative ID into an absolute ID.""" + + if test_id.series is None: + sid = load_user_series_id(pav_cfg) + + if sid is None: + raise TestIDError(f"Unable to resolve test ID '{test_id}' with implicit series " + "'last'. No last series found.") + + test_id.series = sid + + series_dir = working_dir / "series" / str(test_id.series.as_int()) / "test_sets" + + if not series_dir.exists(): + raise TestIDError(f"Unable to resolve relative test ID '{test_id}' to absolute ID. " + f"No series '{test_id.series}' found.") + + # Search the series directory for the symlink matching the relative test ID, then resolve + # it to the absolute test ID. + for test_set in series_dir.iterdir(): + + if not test_set.is_dir(): + continue + + for test in test_set.iterdir(): + if test.name == str(test_id.series.as_int()): + test_id.id = test.resolve().name + test_id.series = None + + return test_id + + raise TestIDError(f"Unable to resolve relative test ID '{test_id}' to absolute ID. " + f"No test '{test_id.id}' found in series '{test_id.series}'.") diff --git a/lib/pavilion/jobs.py b/lib/pavilion/jobs.py index 87e2bc826..229e5b2f4 100644 --- a/lib/pavilion/jobs.py +++ b/lib/pavilion/jobs.py @@ -10,6 +10,7 @@ from pathlib import Path from typing import List, Union, NewType, Dict +from pavilion.test_ids import TestID from pavilion.types import ID_Pair, Nodes @@ -56,7 +57,7 @@ def new(cls, pav_cfg, tests: list, kickoff_fn: str = None): .format(test_link_dir), err) for test in tests: - (test_link_dir/test.full_id).symlink_to(test.path) + (test_link_dir/str(test.id)).symlink_to(test.path) job = cls(job_path) job.set_kickoff(kickoff_fn) @@ -133,6 +134,7 @@ def get_test_id_pairs(self) -> List[ID_Pair]: # create a circular import. pairs = [] + for test_dir in self.tests_path.iterdir(): if test_dir.is_symlink() and test_dir.exists(): try: @@ -143,7 +145,7 @@ def get_test_id_pairs(self) -> List[ID_Pair]: working_dir = test_dir.parents[1] try: - test_id = int(test_dir.name) + test_id = TestID(test_dir.name) except ValueError: # Skip any links that don't go to an id dir. continue diff --git a/lib/pavilion/output.py b/lib/pavilion/output.py index 4d24c0158..ce76f32f2 100644 --- a/lib/pavilion/output.py +++ b/lib/pavilion/output.py @@ -40,6 +40,7 @@ from typing import List, Dict, Union, TextIO, Any, Optional, Callable from pavilion import errors +from pavilion.test_ids import TestID, SeriesID BLACK = 30 RED = 31 @@ -678,7 +679,7 @@ def dt_format_rows(rows: List[Dict], fields: List[str], data = row.get(field, info.get('default', '')) orig_data = data # Transform the data, if a transform is given - if data != '' and data is not None: + if data is not None and not (isinstance(data, str) and data == ""): try: data = info.get('transform', lambda a: a)(data) except (ValueError, AttributeError, KeyError): @@ -952,6 +953,8 @@ def default(self, o): # pylint: disable=E0202 # Just auto-convert anything that looks like a dict. elif isinstance(o, (dict, UserDict)): return dict(o) + elif isinstance(o, (TestID, SeriesID)): + return str(o) # or has an 'as_dict' method elif hasattr(o, 'as_dict'): return o.as_dict() diff --git a/lib/pavilion/result/base.py b/lib/pavilion/result/base.py index 2738f686f..5c5e6d2bc 100644 --- a/lib/pavilion/result/base.py +++ b/lib/pavilion/result/base.py @@ -39,7 +39,7 @@ def get_top_keys(test, topkey: str) -> dict: BASE_RESULTS = { 'name': (lambda test: test.name, "The test run name"), - 'id': (lambda test: test.id, + 'id': (lambda test: str(test.id), "The test run id"), 'test_version': (lambda test: test.test_version, "The test config version."), diff --git a/lib/pavilion/result_utils.py b/lib/pavilion/result_utils.py index cacff59e2..fceb1d567 100644 --- a/lib/pavilion/result_utils.py +++ b/lib/pavilion/result_utils.py @@ -30,7 +30,7 @@ def get_result(test: TestRun): results['results_log'] = test.results_log.as_posix() except (TestRunError, TestRunNotFoundError) as err: - results = {'id': test.full_id} + results = {'id': test.id} for field in BASE_FIELDS[1:]: results[field] = None diff --git a/lib/pavilion/schedulers/advanced.py b/lib/pavilion/schedulers/advanced.py index 35f673b47..bf5541035 100644 --- a/lib/pavilion/schedulers/advanced.py +++ b/lib/pavilion/schedulers/advanced.py @@ -388,7 +388,7 @@ def schedule_tests(self, pav_cfg, tests: List[TestRun]) -> List[SchedulerPluginE node_list_id = int(test.var_man.get('sched.node_list_id')) sched_config = validate_config(test.config['schedule']) - sched_configs[test.full_id] = sched_config + sched_configs[test.id] = sched_config chunk_spec = test.config.get('chunk') if chunk_spec != 'any': # This is validated in test object creation. @@ -452,7 +452,7 @@ def _schedule_chunk(self, pav_cfg, chunk: NodeSet, tests: List[TestRun], errors = [] for test in tests: - sched_config = sched_configs[test.full_id] + sched_config = sched_configs[test.id] if not sched_config['share_allocation']: if sched_config['flex_scheduled']: @@ -471,14 +471,14 @@ def _schedule_chunk(self, pav_cfg, chunk: NodeSet, tests: List[TestRun], for job_share_key, tests in list(share_groups.items()): if len(tests) == 1: test = tests[0] - if sched_configs[test.full_id]['chunking']['size'] in (0, None): + if sched_configs[test.id]['chunking']['size'] in (0, None): flex_tests.append(test) else: indi_tests.append(test) del share_groups[job_share_key] for job_share_key, tests in share_groups.items(): - chunking_enabled = sched_configs[tests[0].full_id]['chunking']['size'] not in (0, None) + chunking_enabled = sched_configs[tests[0].id]['chunking']['size'] not in (0, None) # If the user really wants to use the same nodes even if other nodes are available, # setting share_allocation to max will allow that. use_same_nodes = True if sched_config['share_allocation'] == 'max' else False @@ -521,7 +521,7 @@ def _schedule_shared(self, pav_cfg, tests: List[TestRun], node_range: NodeRange, # At this point the scheduler config should be effectively identical # for the test being allocated. base_test = tests[0] - base_sched_config = sched_configs[base_test.full_id].copy() + base_sched_config = sched_configs[base_test.id].copy() # Get the longest time limit for all the tests. base_sched_config['time_limit'] = max(conf['time_limit'] for conf in sched_configs.values()) @@ -553,7 +553,7 @@ def _schedule_shared(self, pav_cfg, tests: List[TestRun], node_range: NodeRange, # Run each test via pavilion script.command('echo "Starting {} tests - $(date)"'.format(len(tests))) - script.command('pav _run {}'.format(" ".join(test.full_id for test in tests))) + script.command('pav _run {}'.format(" ".join(str(test.id) for test in tests))) script.write(job.kickoff_path) @@ -604,7 +604,7 @@ def _schedule_indi_flex(self, pav_cfg, tests: List[TestRun], prior_error=err, tests=[test])) continue - sched_config = sched_configs[test.full_id] + sched_config = sched_configs[test.id] node_range = calc_node_range(sched_config, len(chunk)) @@ -617,7 +617,7 @@ def _schedule_indi_flex(self, pav_cfg, tests: List[TestRun], node_range=node_range, shebang=test.shebang) - script.command('pav _run {t.full_id}'.format(t=test)) + script.command('pav _run {t.id}'.format(t=test)) script.write(job.kickoff_path) test.job = job @@ -664,7 +664,7 @@ def _schedule_indi_chunk(self, pav_cfg, tests: List[TestRun], # Figure out how many nodes each test needs and sort them least for test in tests: - sched_config = sched_configs[test.full_id] + sched_config = sched_configs[test.id] min_nodes, max_nodes = calc_node_range(sched_config, chunk_size) if max_nodes is None: @@ -682,7 +682,7 @@ def _schedule_indi_chunk(self, pav_cfg, tests: List[TestRun], prior_error=err, tests=[test])) continue - sched_config = sched_configs[test.full_id] + sched_config = sched_configs[test.id] if needed_nodes == 0: if needed_nodes > len(chunk_usage): @@ -712,7 +712,7 @@ def _schedule_indi_chunk(self, pav_cfg, tests: List[TestRun], nodes=picked_nodes, shebang=test.shebang) - script.command('pav _run {t.full_id}'.format(t=test)) + script.command('pav _run {t.id}'.format(t=test)) script.write(job.kickoff_path) test.job = job diff --git a/lib/pavilion/schedulers/basic.py b/lib/pavilion/schedulers/basic.py index 381023666..0fcf3b6cb 100644 --- a/lib/pavilion/schedulers/basic.py +++ b/lib/pavilion/schedulers/basic.py @@ -77,7 +77,7 @@ def schedule_tests(self, pav_cfg, tests: List[TestRun]) -> List[SchedulerPluginE job_share_key = self.gen_job_share_key(sched_config, node_range[0], node_range[1]) else: # If this scheduler doesn't support concurrency, just put every test in its own bin. - job_share_key = test.full_id + job_share_key = test.id job_bins[job_share_key].append(test) job_bin_sched_configs[job_share_key] = (node_range, sched_config) @@ -105,7 +105,7 @@ def schedule_tests(self, pav_cfg, tests: List[TestRun]) -> List[SchedulerPluginE node_range=node_range, shebang=test.shebang) - test_ids = ' '.join(test.full_id for test in tests) + test_ids = ' '.join(str(test.id) for test in tests) script.command('pav _run {}'.format(test_ids)) script.write(job.kickoff_path) diff --git a/lib/pavilion/schedulers/plugins/raw.py b/lib/pavilion/schedulers/plugins/raw.py index c50fd1c99..b7df4d6b0 100644 --- a/lib/pavilion/schedulers/plugins/raw.py +++ b/lib/pavilion/schedulers/plugins/raw.py @@ -6,6 +6,7 @@ import subprocess import time import uuid +import platform from pathlib import Path from typing import Union, List, Tuple @@ -66,23 +67,37 @@ def _get_alloc_node_info(self, node_name) -> NodeInfo: info = NodeInfo({}) - cpus = subprocess.check_output(['nproc']).strip().decode('utf8') - try: - info['cpus'] = int(cpus) - except ValueError: - pass - - with Path('/proc/meminfo').open() as meminfo_file: - for line in meminfo_file.readlines(): - if line.startswith('MemTotal:'): - parts = line.split() - if len(parts) > 2: - try: - info['mem'] = int(parts[1])//1024**2 - except ValueError: - pass - - break + if platform.system() == "Linux": + cpus = subprocess.check_output(['nproc']).strip().decode('utf8') + try: + info['cpus'] = int(cpus) + except ValueError: + pass + + with Path('/proc/meminfo').open() as meminfo_file: + for line in meminfo_file.readlines(): + if line.startswith('MemTotal:'): + parts = line.split() + if len(parts) > 2: + try: + info['mem'] = int(parts[1])//1024**2 + except ValueError: + pass + + break + elif platform.system() == "Darwin": + cpus = subprocess.check_output(['sysctl', '-n', 'hw.logicalcpu']).decode('utf8') + + try: + info['cpus'] = int(cpus) + except ValueError: + pass + + try: + total_bytes = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]).strip()) + info['mem'] = total_bytes // 1024**3 + except ValueError: + pass return info diff --git a/lib/pavilion/schedulers/scheduler.py b/lib/pavilion/schedulers/scheduler.py index ed08420b1..7dc108d5a 100644 --- a/lib/pavilion/schedulers/scheduler.py +++ b/lib/pavilion/schedulers/scheduler.py @@ -530,7 +530,7 @@ def deactivate(self): def _make_kickoff_error(self, orig_err, tests): """Convert a generic error to something with more information.""" - test_names = ['{} ({})'.format(test.full_id, test.name) for test in tests[:2]] + test_names = ['{} ({})'.format(test.id, test.name) for test in tests[:2]] if len(tests) > 2: test_names.append('...') test_names = ', '.join(test_names) diff --git a/lib/pavilion/series/__init__.py b/lib/pavilion/series/__init__.py index 6c10cf709..432899198 100644 --- a/lib/pavilion/series/__init__.py +++ b/lib/pavilion/series/__init__.py @@ -1,12 +1,13 @@ """Module init for series objects and related functions.""" import json -from typing import TextIO, Optional +from pathlib import Path +from typing import TextIO, Optional, List from pavilion import output from pavilion import utils, dir_db +from pavilion.config import PavConfig from pavilion.test_ids import SeriesID -from ..sys_vars import base_classes from ..errors import TestSeriesError, TestSeriesWarning from .info import SeriesInfo, path_to_sid, mk_series_info_transform, TestSetInfo, SeriesInfoBase from .series import TestSeries @@ -14,31 +15,7 @@ from .common import COMPLETE_FN, STATUS_FN, get_all_started -def load_user_series_id(pav_cfg, errfile=None) -> Optional[str]: - """Load the last series id used by the current user.""" - - user = utils.get_login() - last_series_fn = pav_cfg.working_dir/'users' - last_series_fn /= '{}.json'.format(user) - - sys_vars = base_classes.get_vars(True) - sys_name = sys_vars['sys_name'] - - if not last_series_fn.exists(): - return None - - try: - with last_series_fn.open() as last_series_file: - sys_name_series_dict = json.load(last_series_file) - return SeriesID(sys_name_series_dict[sys_name].strip()) - except (IOError, OSError, KeyError) as err: - if errfile: - output.fprint(errfile, "Failed to read series id file '{}'" - .format(last_series_fn), err) - return None - - -def list_series_tests(pav_cfg, sid: str): +def list_series_tests(pav_cfg, sid: SeriesID) -> List[Path]: """Return a list of paths to test run directories for the given series id. :raises TestSeriesError: If the series doesn't exist.""" @@ -63,21 +40,10 @@ def list_series_tests(pav_cfg, sid: str): return test_paths -def path_from_id(pav_cfg, sid: str): +def path_from_id(pav_cfg: PavConfig, sid: SeriesID) -> Path: """Return the path to the series directory given a series id (in the format 's[0-9]+'. :raises TestSeriesError: For an invalid id. """ - if not sid.startswith('s'): - raise TestSeriesError( - "Series id's must start with 's'. Got '{}'".format(sid)) - - try: - raw_id = int(sid[1:]) - except ValueError: - raise TestSeriesError( - "Invalid series id '{}'. Series id's must be in the format " - "s[0-9]+".format(sid)) - - return dir_db.make_id_path(pav_cfg.working_dir/'series', raw_id) + return dir_db.make_id_path(pav_cfg.working_dir/'series', sid.as_int()) diff --git a/lib/pavilion/series/common.py b/lib/pavilion/series/common.py index c47da404c..59bd4be01 100644 --- a/lib/pavilion/series/common.py +++ b/lib/pavilion/series/common.py @@ -12,6 +12,7 @@ from pavilion import status_file from pavilion.test_run import TestRun, TestAttributes from pavilion.types import ID_Pair +from pavilion.test_ids import TestID from ..errors import TestSeriesError COMPLETE_FN = 'SERIES_COMPLETE' @@ -91,10 +92,7 @@ def find_tests(self): if not path.is_symlink(): continue - try: - test_id = int(path.name) - except ValueError: - continue + test_id = TestID(path.resolve().name) try: working_dir = path.resolve().parents[1] @@ -265,6 +263,7 @@ def get_test_set_complete(pav_cfg: config.PavConfig, test_set_path: Path, if check_tests: latest = None + for test_path in dir_db.select(pav_cfg, test_set_path).paths: complete_ts = TestAttributes(test_path).complete_time diff --git a/lib/pavilion/series/info.py b/lib/pavilion/series/info.py index f80582525..06bd9484e 100644 --- a/lib/pavilion/series/info.py +++ b/lib/pavilion/series/info.py @@ -11,6 +11,7 @@ from pavilion import utils from pavilion.errors import TestRunError, TestSeriesError from pavilion.test_run import TestRun, TestAttributes +from pavilion.test_ids import SeriesID from . import common @@ -74,15 +75,9 @@ def attr_doc(cls, attr): return attr_prop.__doc__ @property - def sid(self): - """The sid of this series.""" - - return path_to_sid(self.path) - - @property - def id(self): # pylint: disable=invalid-name + def id(self) -> SeriesID: # pylint: disable=invalid-name """The id of this series.""" - return int(self.path.name) + return SeriesID(f"s{self.path.name}") @property def user(self): @@ -118,6 +113,7 @@ def passed(self) -> int: """Number of tests that have passed.""" passed = 0 + for test_path in self._tests: test_info = self.test_info(test_path) if test_info is None: @@ -346,17 +342,11 @@ def all_started(self): return common.get_all_started(self.path) @classmethod - def load(cls, pav_cfg: config.PavConfig, sid: str): + def load(cls, pav_cfg: config.PavConfig, sid: SeriesID) -> "SeriesInfo": """Find and load a series info object from a series id.""" - try: - id_ = int(sid[1:]) - except ValueError: - raise TestSeriesError( - "Invalid series id '{}'. Series id should " - "look like 's1234'.".format(sid)) + series_path = pav_cfg.working_dir/'series'/str(sid.as_int()) - series_path = pav_cfg.working_dir/'series'/str(id_) if not series_path.exists(): raise TestSeriesError("Could not find series '{}'".format(sid)) return cls(pav_cfg, series_path) diff --git a/lib/pavilion/series/series.py b/lib/pavilion/series/series.py index a3fa18426..25214d1d5 100644 --- a/lib/pavilion/series/series.py +++ b/lib/pavilion/series/series.py @@ -1,4 +1,5 @@ # pylint: disable=W0221 +# pylint: disable=invalid-name """Series are built around a config that specifies a 'series' of tests to run. It also tracks the tests that have run under it.""" import io @@ -17,6 +18,7 @@ from typing import List, Dict, Set, Union, TextIO, Iterator, Optional import pavilion +from pavilion.config import PavConfig from pavilion import cancel_utils from pavilion import config from pavilion import dir_db @@ -32,7 +34,9 @@ from pavilion.types import ID_Pair from pavilion.micro import partition, do, listfilter, stardo from pavilion.timing import TimeLimiter +from pavilion.test_ids import SeriesID from pavilion.result_logging import get_result_loggers +from pavilion.dir_db import create_id_dir from yaml_config import YAMLError, RequiredError from .info import SeriesInfo from .test_set import TestSet @@ -54,8 +58,9 @@ class TestSeries: PGID_FN = 'series.pgid' CANCEL_FN = 'series.CANCELED' NAME_RE = re.compile('[a-z][a-z0-9_-]+$') + TESTSET_DIRNAME = "test_sets" - def __init__(self, pav_cfg: config.PavConfig, series_cfg, _id=None, + def __init__(self, pav_cfg: config.PavConfig, series_cfg, _id: Optional[SeriesID] = None, verbosity: Verbose = Verbose.HIGH, outfile: TextIO = None, cancel_cooldown: float = 0.5): """Initialize the series. Test sets may be added via 'add_tests()'. @@ -102,12 +107,13 @@ def __init__(self, pav_cfg: config.PavConfig, series_cfg, _id=None, if _id is None: # Get the series id and path. try: - self._id, self.path = dir_db.create_id_dir(series_path) + _id, self.path = dir_db.create_id_dir(series_path) except (OSError, TimeoutError) as err: raise TestSeriesError( "Could not get id or series directory in '{}'" .format(series_path), err) + self.id = SeriesID.from_int(_id) # save series config self.save_config() @@ -119,12 +125,12 @@ def __init__(self, pav_cfg: config.PavConfig, series_cfg, _id=None, # We're not creating this from scratch (an object was made ahead of # time). else: - self._id = _id - self.path = dir_db.make_id_path(series_path, self._id) + self.id = _id + self.path = dir_db.make_id_path(series_path, self.id.as_int()) self.status = SeriesStatusFile(self.path/common.STATUS_FN) self.tests = common.LazyTestRunDict(pav_cfg, self.path) - self.result_loggers = get_result_loggers(pav_cfg, self.sid) + self.result_loggers = get_result_loggers(pav_cfg, self.id) self.log_proc = None def run_background(self): @@ -142,7 +148,7 @@ def run_background(self): env['PAV_CONFIG_FILE'] = pav_cfg.resolve() # start subprocess - temp_args = [pav_exe, '_series', self.sid] + temp_args = [pav_exe, '_series', str(self.id)] try: series_out_path = self.path/self.OUT_FN with series_out_path.open('w') as series_out: @@ -157,7 +163,7 @@ def run_background(self): except OSError as err: raise TestSeriesError("Could not start series '{}' in the background." - .format(self.sid), err) + .format(self.id), err) # write pgid to a file (atomically) series_pgid = os.getpgid(series_proc.pid) @@ -204,47 +210,19 @@ def save_config(self) -> None: def test_set_dirs(self) -> Iterator[Path]: """Return an iterator over the test set directories for this series.""" - if (self.path/'test_sets').exists(): - for dir in (self.path/'test_sets').iterdir(): + if (self.path/self.TESTSET_DIRNAME).exists(): + for dir in (self.path/self.TESTSET_DIRNAME).iterdir(): if dir.is_dir(): yield dir - @property - def sid(self): # pylint: disable=invalid-name - """Return the series id as a string, with an 's' in the front to -differentiate it from test ids.""" - - return 's{}'.format(self._id) - - @classmethod - def sid_to_id(cls, sid: str) -> int: - """Convert a sid string to a numeric series id. - - :raises TestSeriesError: On an invalid sid. - """ - - if not sid.startswith('s'): - raise TestSeriesError( - "Invalid SID '{}'. Must start with 's'.".format(sid)) - - try: - return int(sid[1:]) - except ValueError: - raise TestSeriesError( - "Invalid SID '{}'. Must end in an integer.".format(sid)) - @classmethod - def load(cls, pav_cfg, sid: Union[str, int], outfile=None): + def load(cls, pav_cfg: PavConfig, sid: SeriesID, outfile: TextIO = None) -> "TestSeries": """Load a series object from the given id, along with all of its associated tests. :raises TestSeriesError: From invalid series id or path.""" - if isinstance(sid, str): - series_id = cls.sid_to_id(sid) - else: - series_id = sid - + series_id = sid.as_int() series_path = pav_cfg.working_dir/'series' series_path = dir_db.make_id_path(series_path, series_id) @@ -265,7 +243,7 @@ def load(cls, pav_cfg, sid: Union[str, int], outfile=None): raise TestSeriesError("Could not load config file for test series '{}': {}" .format(sid), err) - series = cls(pav_cfg, _id=series_id, series_cfg=series_cfg, outfile=outfile) + series = cls(pav_cfg, _id=sid, series_cfg=series_cfg, outfile=outfile) return series def _create_test_sets(self, iteration=0): @@ -278,7 +256,7 @@ def _create_test_sets(self, iteration=0): "no test sets for a series, but this series has: {}" .format(self.test_sets)) - sets_path = self.path/'test_sets' + sets_path = self.path/self.TESTSET_DIRNAME sets_path.mkdir(exist_ok=True) # What each test depends on. @@ -449,12 +427,12 @@ def run(self, build_only: bool = False, rebuild: bool = False, if log_results: try: # Create a new process to log test results as tests complete - log_res_args = [pav_exe, '_log_results', self.sid] + log_res_args = [pav_exe, '_log_results', str(self.id)] self.log_proc = subprocess.Popen(log_res_args, start_new_session=True, env=env) except OSError as err: raise TestSeriesError( "Could not start result logger in the background for series '{}'." - .format(self.sid), err) + .format(self.id), err) # create the test sets and link together. try: @@ -502,12 +480,12 @@ def run(self, build_only: bool = False, rebuild: bool = False, self.status.set(SERIES_STATES.ERROR, "Error running test set {}. See the series log " "`pav log series {}`. {}" - .format(test_set.name, self.sid, err.args[0])) + .format(test_set.name, self.id, err.args[0])) self.set_complete() raise TestSeriesError( "Error making tests for series '{}'." - .format(self.sid), err) + .format(self.id), err) potential_sets = list(waiting_sets) @@ -582,11 +560,11 @@ def _run_set(self, test_set: TestSet, build_only: bool, rebuild: bool, local_bui except TestSetError as err: self.status.set(SERIES_STATES.BUILD_ERROR, "Error building tests. See the series log `pav log series {}" - .format(self.sid)) + .format(self.id)) self.set_complete() raise TestSeriesError( "Error building tests for series '{}'" - .format(self.sid), err) + .format(self.id), err) if not test_set.ready_to_start: continue @@ -599,14 +577,14 @@ def _run_set(self, test_set: TestSet, build_only: bool, rebuild: bool, local_bui tests_running += len(started_tests) except TestSetError as err: self.status.set(SERIES_STATES.KICKOFF_ERROR, - "Error kicking off tests for series '{}'".format(self.sid)) + "Error kicking off tests for series '{}'".format(self.id)) raise TestSeriesError( - "Error kicking off tests for series '{}'".format(self.sid)) + "Error kicking off tests for series '{}'".format(self.id)) if self.verbosity != Verbose.QUIET: if len(new_jobs) == 1: fprint(self.outfile, "Kicked off a job for test set '{}' in series {}." - .format(test_set.name, self.sid)) + .format(test_set.name, self.id)) else: ktests = ', '.join([test.name for test in started_tests[:3]] + ['...'] if len(started_tests) > 3 else []) @@ -614,7 +592,7 @@ def _run_set(self, test_set: TestSet, build_only: bool, rebuild: bool, local_bui fprint(self.outfile, "Kicked off tests {} ({} total) for test set {} " "in series {}." .format(ktests, len(started_tests), - test_set.name, self.sid)) + test_set.name, self.id)) # If simultaneous is set in the test_set, use that. _simultaneous = test_set.simultaneous if test_set.simultaneous else self.simultaneous # Wait for jobs until enough have finished to start a new batch. @@ -625,7 +603,7 @@ def _run_set(self, test_set: TestSet, build_only: bool, rebuild: bool, local_bui WAIT_INTERVAL = 0.5 - def wait(self, timeout: float = None) -> None: + def wait(self, timeout: Optional[float] = None) -> None: """Wait for the series to be complete or the timeout to expire. """ if timeout is None: @@ -640,15 +618,15 @@ def wait(self, timeout: float = None) -> None: time.sleep(self.WAIT_INTERVAL) raise TimeoutError("Series {} did not complete before timeout." - .format(self._id)) + .format(self.id)) - def wait_log(self, timeout: float = None) -> None: + def wait_log(self, timeout: Optional[float] = None) -> None: """Wait until the result logging process finishes.""" if self.log_proc is None: return - self.log_proc.wait() + self.log_proc.wait(timeout) @property def complete(self) -> bool: @@ -723,7 +701,7 @@ def add_test_set_config( if name in self.config['test_sets']: raise TestSeriesError("A test set called '{}' already exists in series {}" - .format(name, self.sid)) + .format(name, self.id)) self.config['test_sets'][name] = { 'tests': test_names, @@ -758,16 +736,18 @@ def _add_tests(self, tests, test_set_name): def _add_test(self, test_set_name: str, test: TestRun): """Add the given test to the series.""" - set_path = self.path/'test_sets'/test_set_name + set_path = self.path/self.TESTSET_DIRNAME/test_set_name try: set_path.mkdir(exist_ok=True, parents=True) except OSError as err: raise TestSeriesError( "Could not create test set directory {} under series {}." - .format(set_path, self.sid), err) + .format(set_path, self.id), err) # attempt to make symlink - link_path = dir_db.make_id_path(set_path, test.id) + _, link_path = create_id_dir(set_path, + link_target=test.path, + next_fn=self.path/self.TESTSET_DIRNAME/"next_id") self.tests[test.id_pair] = test @@ -804,13 +784,13 @@ def _save_series_id(self): # File was empty, therefore json couldn't be loaded. pass with json_file.open('w') as json_series_file: - data[sys_name] = self.sid + data[sys_name] = str(self.id) json_series_file.write(json.dumps(data)) except FileNotFoundError: # File hadn't been created yet. with json_file.open('w') as json_series_file: - data[sys_name] = self.sid + data[sys_name] = str(self.id) json_series_file.write(json.dumps(data)) def get_result_paths(self) -> List[Path]: diff --git a/lib/pavilion/series/test_set.py b/lib/pavilion/series/test_set.py index e7ab91a71..b36f362e7 100644 --- a/lib/pavilion/series/test_set.py +++ b/lib/pavilion/series/test_set.py @@ -285,7 +285,7 @@ def make_iter(self, build_only=False, rebuild=False, local_builds_only=False) \ if self.verbosity in (Verbose.HIGH, Verbose.MAX): output.fprint(self.outfile, 'Created and saved test run {} - {}' - .format(test_run.full_id, test_run.name)) + .format(test_run.id, test_run.name)) else: skip_count += 1 msg = "{} - {}" \ @@ -432,7 +432,7 @@ def build(self, deprecated_builds: Union[Set[str], None] = None, # Keep track of what the last message printed per build was. # This is for double build verbosity. - message_counts = {test.full_id: 0 for test in local_builds} + message_counts = {test.id: 0 for test in local_builds} # Used to track which threads are for which tests. test_by_threads = {} @@ -457,7 +457,7 @@ def build(self, deprecated_builds: Union[Set[str], None] = None, if self.verbosity in (Verbose.HIGH, Verbose.MAX): output.fprint(self.outfile, "Skipping build for test {} - prior attempts failed." - .format(test.full_id)) + .format(test.id)) test.status.set(STATES.BUILD_FAILED, "Build failed when being built for test {} (they " "share a build.".format(failed_builds[test.builder.name])) @@ -488,7 +488,7 @@ def build(self, deprecated_builds: Union[Set[str], None] = None, if not cancel_event.is_set(): built_tests.append(test) else: - failed_builds[test.builder.name] = test.full_id + failed_builds[test.builder.name] = test.id test.set_run_complete() # Output test status after joining a thread. @@ -498,7 +498,7 @@ def build(self, deprecated_builds: Union[Set[str], None] = None, when, state, msg = notes[-1] when = output.get_relative_timestamp(when) preamble = (self.BUILD_STATUS_PREAMBLE - .format(when=when, test_id=test.full_id, + .format(when=when, test_id=str(test.id), state_len=STATES.max_length, state=state)) output.fprint(self.outfile, preamble, msg, width=None, @@ -523,18 +523,18 @@ def build(self, deprecated_builds: Union[Set[str], None] = None, output.fprint(self.outfile, line, width=None, end='\r', clear=True) elif self.verbosity == Verbose.MAX: for test in local_builds: - seen = message_counts[test.full_id] + seen = message_counts[test.id] msgs = self.mb_tracker.get_notes(test.builder)[seen:] for when, state, msg in msgs: when = output.get_relative_timestamp(when) state = '' if state is None else state preamble = self.BUILD_STATUS_PREAMBLE.format( - when=when, test_id=test.id, + when=when, test_id=str(test.id), state_len=STATES.max_length, state=state) output.fprint(self.outfile, preamble, msg, width=None, wrap_indent=len(preamble)) - message_counts[test.full_id] += len(msgs) + message_counts[test.id] += len(msgs) time.sleep(self.BUILD_SLEEP_TIME) @@ -582,6 +582,7 @@ def kickoff(self) -> Tuple[List[TestRun], List[Job]]: self.status.set(S_STATES.SET_KICKOFF, "Kicking off {} tests under scheduler {}" .format(len(sched_tests), sched_name)) + sched_errors = scheduler.schedule_tests(self.pav_cfg, sched_tests) # Update the status of each test with any errors received from the scheduler. @@ -647,13 +648,10 @@ def _abort_builds(self, tests: List[TestRun]): "Tests with build errors:" ] - test_id = '' + test_id = None for tracker in self.mb_tracker.failures(): test = tracker.test - if test.full_id.startswith('main'): - test_id = str(test.id) - else: - test_id = test.full_id + test_id = test.id msg.append( " - {test} ({id} in test set '{set_name}')" diff --git a/lib/pavilion/status_utils.py b/lib/pavilion/status_utils.py index b24721e75..4a094913f 100644 --- a/lib/pavilion/status_utils.py +++ b/lib/pavilion/status_utils.py @@ -89,7 +89,7 @@ def status_from_test_obj(pav_cfg: dict, test: TestRun): 'result': result, 'series': series_id, 'state': status_f.state, - 'test_id': test.id if test.full_id.startswith('main') else test.full_id, + 'test_id': test.id, 'time': status_f.when, } @@ -113,7 +113,7 @@ def get_status(test: TestRun, pav_conf): 'result': '', 'series': '', 'state': STATES.UNKNOWN, - 'test_id': test.full_id, + 'test_id': test.id, 'time': '', } @@ -148,7 +148,6 @@ def print_status(statuses: List[dict], outfile, :return: success or failure. :rtype: int """ - if json: json_data = {'statuses': statuses} output.json_dump(json_data, outfile) @@ -171,7 +170,7 @@ def print_status(statuses: List[dict], outfile, 'time': { 'transform': output.get_relative_timestamp, 'title': 'Updated'}, - 'test_id': {'title': 'Test'}, + 'test_id': {'title': 'Test', 'transform': str}, }, fields=fields, rows=flat_sorted_statuses, diff --git a/lib/pavilion/sys_vars/host_arch.py b/lib/pavilion/sys_vars/host_arch.py index 286d0e0d3..2aff2255b 100644 --- a/lib/pavilion/sys_vars/host_arch.py +++ b/lib/pavilion/sys_vars/host_arch.py @@ -1,4 +1,4 @@ -import subprocess +import platform from .base_classes import SystemPlugin @@ -15,6 +15,4 @@ def __init__(self): def _get(self): """Base method for determining the host architecture.""" - out = subprocess.check_output(['uname', '-i']) - - return out.strip().decode('utf8') + return platform.machine() diff --git a/lib/pavilion/sys_vars/host_cpus.py b/lib/pavilion/sys_vars/host_cpus.py index dfa8aea13..9c53a1fca 100644 --- a/lib/pavilion/sys_vars/host_cpus.py +++ b/lib/pavilion/sys_vars/host_cpus.py @@ -1,4 +1,4 @@ -import subprocess +import os from .base_classes import SystemPlugin @@ -10,8 +10,7 @@ def __init__(self): description="The system processor count.", priority=self.PRIO_CORE) - def _get( self): + def _get(self): """Base method for determining the system processor count.""" - name = subprocess.check_output(['grep', '-c', r'^processor\s*:\s*\d*', '/proc/cpuinfo']) - return name.strip().decode('UTF-8') + return str(os.cpu_count()) diff --git a/lib/pavilion/sys_vars/host_os.py b/lib/pavilion/sys_vars/host_os.py index 7178d0c5e..a598fe696 100644 --- a/lib/pavilion/sys_vars/host_os.py +++ b/lib/pavilion/sys_vars/host_os.py @@ -1,3 +1,4 @@ +import platform from pathlib import Path from .base_classes import SystemPlugin @@ -14,15 +15,22 @@ def __init__(self): def _get(self): """Base method for determining the operating host and version.""" - with Path('/etc/os-release').open('r') as release: - rlines = release.readlines() - os_info = {} - for line in rlines: - if line[:3] == 'ID=': - os_info['name'] = line[3:].strip().strip('"') - elif line[:11] == 'VERSION_ID=': - os_info['version'] = line[11:].strip().strip('"') + if platform.system() == "Linux": + with Path('/etc/os-release').open('r') as release: + rlines = release.readlines() + + for line in rlines: + if line[:3] == 'ID=': + os_info['name'] = line[3:].strip().strip('"') + elif line[:11] == 'VERSION_ID=': + os_info['version'] = line[11:].strip().strip('"') + elif platform.system() == "Darwin": + os_info['name'] = 'macOS' + os_info['version'] = platform.mac_ver()[0] + elif platform.system() == "Windows": + os_info['name'] = 'Windows' + os_info['version'] = platform.version() return os_info diff --git a/lib/pavilion/sys_vars/platform.py b/lib/pavilion/sys_vars/platform.py index ddd536dd5..ed5e60052 100644 --- a/lib/pavilion/sys_vars/platform.py +++ b/lib/pavilion/sys_vars/platform.py @@ -1,4 +1,5 @@ from pathlib import Path +import platform from .base_classes import SystemPlugin @@ -13,16 +14,23 @@ def __init__(self): def _get(self): """Base method for determining the system platform.""" - with Path('/etc/os-release').open() as release: - rlines = release.readlines() - name = "" version = "" - for line in rlines: - if line[:3] == 'ID=': - name = line[3:].strip().strip('"') - elif line[:11] == 'VERSION_ID=': - version = line[11:].strip().strip('"') + if platform.system() == "Linux": + with Path('/etc/os-release').open() as release: + rlines = release.readlines() + + for line in rlines: + if line[:3] == 'ID=': + name = line[3:].strip().strip('"') + elif line[:11] == 'VERSION_ID=': + version = line[11:].strip().strip('"') + elif platform.system() == "Darwin": + name = 'macOS' + version = platform.mac_ver()[0] + elif platform.system() == "Windows": + name = 'Windows' + version = platform.version() return f"{name}-{version}" diff --git a/lib/pavilion/sys_vars/sys_arch.py b/lib/pavilion/sys_vars/sys_arch.py index 21edefcb8..af1e0ff4f 100644 --- a/lib/pavilion/sys_vars/sys_arch.py +++ b/lib/pavilion/sys_vars/sys_arch.py @@ -1,4 +1,4 @@ -import subprocess +import platform from .base_classes import SystemPlugin @@ -13,5 +13,4 @@ def __init__( self ): def _get( self ): """Base method for determining the system architecture.""" - arch = subprocess.check_output(['uname', '-i']) - return arch.strip().decode('utf8') + return platform.machine() diff --git a/lib/pavilion/sys_vars/sys_os.py b/lib/pavilion/sys_vars/sys_os.py index f631fb4b8..e53200aec 100644 --- a/lib/pavilion/sys_vars/sys_os.py +++ b/lib/pavilion/sys_vars/sys_os.py @@ -1,4 +1,5 @@ from pathlib import Path +import platform from .base_classes import SystemPlugin @@ -13,15 +14,22 @@ def __init__(self): def _get(self): """Base method for determining the operating system and version.""" - with Path('/etc/os-release').open() as release: - rlines = release.readlines() - os_info = {} - for line in rlines: - if line[:3] == 'ID=': - os_info['name'] = line[3:].strip().strip('"') - elif line[:11] == 'VERSION_ID=': - os_info['version'] = line[11:].strip().strip('"') + if platform.system() == "Linux": + with Path('/etc/os-release').open() as release: + rlines = release.readlines() + + for line in rlines: + if line[:3] == 'ID=': + os_info['name'] = line[3:].strip().strip('"') + elif line[:11] == 'VERSION_ID=': + os_info['version'] = line[11:].strip().strip('"') + elif platform.system() == "Darwin": + os_info['name'] = 'macOS' + os_info['version'] = platform.mac_ver()[0] + elif platform.system() == "Windows": + os_info['name'] = 'Windows' + os_info['version'] = platform.version() return os_info diff --git a/lib/pavilion/test_ids.py b/lib/pavilion/test_ids.py index 6046604a2..6f2306a10 100644 --- a/lib/pavilion/test_ids.py +++ b/lib/pavilion/test_ids.py @@ -1,19 +1,32 @@ -from typing import Union, Tuple, List, Iterable, Optional +# pylint: disable=invalid-name + +import re +from typing import Union, Tuple, List, Iterable, Optional, Dict, Any, TextIO from abc import ABC, abstractmethod -from pavilion.micro import flatten, unique -from pavilion.utils import is_int +from pavilion.utils import is_int, is_hash +from pavilion.errors import PavilionError + + +HASH_LEN = 32 + + +class TestIDError(PavilionError): + """Error related to the manipulation and resolution of test IDs.""" class ID(ABC): """Base class for IDs""" def __init__(self, id_str: str): + if not self.is_valid_id(id_str): + raise ValueError(f"Invalid string {id_str} for type {self.__class__.__name__}.") + self.id_str = id_str - @staticmethod + @classmethod @abstractmethod - def is_valid_id(id_str: str) -> bool: + def is_valid_id(cls, id_str: str) -> bool: """Determine whether the given string constitutes a valid ID.""" raise NotImplementedError @@ -21,8 +34,22 @@ def is_valid_id(id_str: str) -> bool: def __str__(self) -> str: return self.id_str - def __eq__(self, other: "ID") -> bool: - return self.id_str == other.id_str + def __eq__(self, other: Any) -> bool: + if not hasattr(other, "id_str"): + return False + + return self.id_str.lower() == other.id_str.lower() + + @abstractmethod + def __gt__(self, other: "ID") -> bool: + raise NotImplementedError + + def __lt__(self, other: "ID") -> bool: + if not isinstance(other, self.__class__): + raise TypeError(f"Incompatible type for comparison with {self.__class__.__name__}: "\ + f"{type(other).__name__}.") + + return not (self > other or self == other) def __repr__(self) -> str: return f"{type(self).__name__}({self.id_str})" @@ -34,56 +61,69 @@ def __hash__(self) -> int: class TestID(ID): """Represents a single test ID.""" + def __init__(self, id_str: str): + super().__init__(id_str) + + parts = self.id_str.split('.', 1) + + if len(parts) == 2: + self.series = SeriesID(parts[0]) + self.id = int(parts[1]) + else: + self.series = None + + if is_int(parts[0]): + self.id = int(parts[0]) + else: + self.id = parts[0] + + self.parts = (self.series, self.id) + @classmethod def is_valid_id(cls, id_str: str) -> bool: """Determine whether the given string constitutes a valid test ID.""" - test_num = -1 + if is_hash(id_str, HASH_LEN) or is_int(id_str): + return True if "." in id_str: - test_num = int(id_str.split(".")[-1]) - elif is_int(id_str): - test_num = int(id_str) - - return test_num > 0 + series_id_str, num_str = id_str.split(".", 1) - def is_int(self) -> bool: - """Determine whether the test ID is an integer value.""" + test_num = -1 - return is_int(self.id_str) + if is_int(num_str): + test_num = int(num_str) - def as_int(self) -> int: - """Convert the test ID into an integer, if possible.""" - - try: - return int(self.id_str) - except: - raise ValueError(f"Test with ID {self.id_str} cannot be converted to an integer.") + return test_num >= 0 and SeriesID.is_valid_id(series_id_str) - @property - def parts(self) -> Tuple[str]: - """Return a tuple of components of the test ID, where components are separated by - periods.""" + return False - return tuple(self.id_str.split('.', 1)) + def is_absolute(self) -> bool: + """Returns true if the ID is absolute (i.e. not series-relative).""" - @property - def label(self) -> str: - """Return the config label component of the test ID.""" + return isinstance(self.id, str) - if len(self.parts) > 1: - return self.parts[0] + def is_relative(self) -> bool: + """Returns true if the ID is relative to a particular series.""" - return "main" + return not self.is_absolute() - @property - def test_num(self) -> Optional[int]: - """Return the test number component of the test ID.""" + def __gt__(self, other: "TestID") -> bool: + if not isinstance(other, self.__class__): + raise TypeError(f"Incompatible type for comparison with {self.__class__.__name__}: "\ + f"{type(other).__name__}.") - if self.is_int(): - return self.as_int() - elif len(self.parts) > 1: - return int(self.parts[-1]) + if self.is_absolute() and other.is_absolute(): + return int(self.id_str, 16) > int(other.id_str, 16) + elif self.is_series_relative() and other.is_series_relative(): + if self.series == other.series: + return int(self.id) > int(other.id) + else: + raise TypeError(f"Cannot compare test IDs {self} and {other} " + "from different series.") + else: + raise TypeError("Incompatible test ID formats for numerical comparison: "\ + "{self} and {other}") class SeriesID(ID): @@ -93,22 +133,18 @@ class SeriesID(ID): def is_valid_id(cls, id_str: str) -> bool: """Determine whether the given string constitutes a valid series ID.""" - return cls.is_abstract_id(id_str) or (len(id_str) > 0 and id_str[0] == 's' \ + return id_str.lower() in ("last", "all") or (len(id_str) > 0 and id_str[0] == 's' \ and is_int(id_str[1:]) and int(id_str[1:]) > 0) - @staticmethod - def is_abstract_id(id_str: str) -> bool: - """Determine whether the given string is an abstract ID, that is, whether it - is 'last' or 'all'.""" + def is_abstract_id(self) -> bool: + """Return true if the ID is an abstract ID, that is, whether it is 'last' or 'all'.""" - return id_str.lower() in ("last", "all") + return self.all() or self.last() - @classmethod - def is_concrete_id(cls, id_str: str) -> bool: - """Determine whether the given string is a concrete ID, that is, whether it - is not 'last' or 'all'.""" + def is_concrete_id(self) -> bool: + """Return true if the ID is a concrete ID, that is, whether it is not 'last' or 'all'.""" - return cls.is_valid_id(id_str) and not cls.is_abstract_id(id_str) + return not self.is_abstract_id() def all(self) -> bool: """Determine whether the ID is the set of all IDs.""" @@ -120,45 +156,55 @@ def last(self) -> bool: return self.id_str.lower() == "last" - def is_int(self) -> bool: - """Determine whether the series ID is an integer value.""" - - return len(self.id_str) > 0 and is_int(self.id_str[1:]) - def as_int(self) -> int: """Convert the series ID into an integer, if possible.""" - if self.all() or self.last(): - raise ValueError(f"Series with ID {self.id_str} cannot be converted to an integer.") + if self.is_abstract_id(): + raise ValueError(f"Abstract series '{self}' cannot be converted to an integer.") return int(self.id_str[1:]) + @classmethod + def from_int(cls, id: int) -> "SeriesID": + """Create a new SeriesID from an int.""" + + return cls(f"s{id}") + + def __gt__(self, other: "SeriesID"): + if not isinstance(other, self.__class__): + raise TypeError(f"Incompatible type for comparison with {self.__class__.__name__}: "\ + f"{type(other).__name__}.") + + return self.as_int() > other.as_int() -class GroupID: +class GroupID(ID): """Represents a single group ID.""" - def __init__(self, id_str: str): - self.id_str = id_str + GROUP_NAME_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9_-]+$') - @staticmethod - def is_valid_id(id_str: str) -> bool: + @classmethod + def is_valid_id(cls, id_str: str) -> bool: """Determine whether the given string constitutes a valid group ID.""" - return len(id_str) > 0 and not (TestID.is_valid_id(id_str) or SeriesID.is_valid_id(id_str)) - - def __str__(self) -> str: - return self.id_str + return not (TestID.is_valid_id(id_str) or \ + SeriesID.is_valid_id(id_str) or \ + TestRange.is_valid_range_str(id_str) or \ + SeriesRange.is_valid_range_str(id_str)) and \ + cls.GROUP_NAME_RE.match(id_str) is not None - def __eq__(self, other: "GroupID") -> bool: - return self.id_str == other.id_str + def __gt__(self, other: "GroupID") -> bool: + if not isinstance(other, self.__class__): + raise TypeError(f"Incompatible type for comparison with {self.__class__.__name__}: "\ + f"{type(other).__name__}.") - def __repr__(self) -> str: - return f"{type(self).__name__}({self.id_str})" + return self.id_str > other.id_str class IDRange(ABC): """Represents a contiguous sequence of IDs.""" def __init__(self, start: int, end: int): + if start > end: + raise ValueError(f"End value {end} must be greater than or equal to {start}.") self.start = start self.end = end @@ -198,6 +244,9 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"{type(self).__name__}({self.start}, {self.end})" + def __len__(self) -> int: + return self.end - self.start + 1 + class TestRange(IDRange): """Represents a contiguous sequence of test IDs.""" @@ -276,38 +325,32 @@ def expand(self) -> List[SeriesID]: def __str__(self) -> str: return f"s{self.start}-s{self.end}" - -def multi_convert(id_str: str) -> Union[List[TestID], List[SeriesID], List[GroupID]]: - """Convert a string into a list (possibly a singleton list) of either a TestID, SeriesID, - or GroupID as appropriate.""" - - if id_str.lower() == "all": - return [SeriesID("all")] - if id_str.lower() == "last": - return [SeriesID("last")] - - if TestRange.is_valid_range_str(id_str): - return list(TestRange.from_str(id_str).expand()) - if SeriesRange.is_valid_range_str(id_str): - return list(SeriesRange.from_str(id_str).expand()) - if TestID.is_valid_id(id_str): - return [TestID(id_str)] - if SeriesID.is_valid_id(id_str): - return [SeriesID(id_str)] - - return [GroupID(id_str)] - - def resolve_mixed_ids(ids: Iterable[str], - auto_last: bool = True) -> List[Union[TestID, SeriesID, GroupID]]: + auto_last: bool = True) -> Dict[str, List[ID]]: """Fully resolve all IDs in the given list into either test IDs, series IDs, or group IDs.""" + id_dict = {"tests": [], "series": [], "groups": []} + ids = list(ids) if auto_last and len(ids) == 0: - return [SeriesID("last")] + id_dict["series"].append(SeriesID("last")) if "all" in ids: - return [SeriesID("all")] - - return list(flatten(map(multi_convert, ids))) + id_dict["series"].append(SeriesID("all")) + + return id_dict + + for id_str in ids: + if TestID.is_valid_id(id_str): + id_dict["tests"].append(TestID(id_str)) + elif SeriesID.is_valid_id(id_str): + id_dict["series"].append(SeriesID(id_str)) + elif GroupID.is_valid_id(id_str): + id_dict["groups"].append(GroupID(id_str)) + elif TestRange.is_valid_range_str(id_str): + id_dict["tests"].extend(TestRange.from_str(id_str).expand()) + elif SeriesRange.is_valid_range_str(id_str): + id_dict["series"].extend(SeriesRange.from_str(id_str).expand()) + + return id_dict diff --git a/lib/pavilion/test_run/test_attrs.py b/lib/pavilion/test_run/test_attrs.py index 79af9bfc7..1b0c52fa2 100644 --- a/lib/pavilion/test_run/test_attrs.py +++ b/lib/pavilion/test_run/test_attrs.py @@ -8,6 +8,7 @@ from pavilion.config import DEFAULT_CONFIG_LABEL from pavilion.errors import TestRunError from pavilion.status_file import TestStatusInfo, TestStatusFile, STATES +from pavilion.test_ids import TestID # pylint: disable=protected-access @@ -47,6 +48,7 @@ class TestAttributes(Mapping): """ serializers = { + "id": str, "status": lambda s: s.path.as_posix(), 'suite_path': lambda p: p.as_posix(), } @@ -54,6 +56,7 @@ class TestAttributes(Mapping): deserializers = { 'created': utils.deserialize_datetime, 'finished': utils.deserialize_datetime, + 'id': TestID, 'started': utils.deserialize_datetime, "status": lambda s: TestStatusFile(Path(s)), 'suite_path': lambda p: Path(p) if p is not None else None, @@ -153,7 +156,7 @@ def load_legacy_attributes(self, initial_attrs=None): 'build_name': None, 'created': self.path.stat().st_mtime, 'finished': self.path.stat().st_mtime, - 'id': int(self.path.name), + 'id': self.path.name, 'rebuild': False, 'result': None, 'skipped': None, @@ -311,7 +314,7 @@ def results(self): 'name': self.name, 'sys_name': self.sys_name, 'created': self.created, - 'id': self.full_id, + 'id': str(self.id), 'result': None, } else: @@ -323,17 +326,6 @@ def result(self): return self.results.get('result', None) - @property - def full_id(self): - """The test full id, which is the config label it was created under - and the test id. The default config label is omitted.""" - # If the cfg label is actually something that exists, use it in the - # test full_id. Otherwise give the test path. - if self.cfg_label == DEFAULT_CONFIG_LABEL or self.cfg_label is None: - return '{}'.format(self.id) - else: - return '{}.{}'.format(self.cfg_label, self.id) - @property def state(self) -> Optional[TestStatusInfo]: """Returns the current state of the test.""" @@ -442,4 +434,4 @@ def test_run_attr_transform(path): """A dir_db transformer to convert a test_run path into a dict of test attributes.""" - return TestAttributes(path).attr_dict(serialize=True) + return TestAttributes(path).attr_dict() diff --git a/lib/pavilion/test_run/test_run.py b/lib/pavilion/test_run/test_run.py index a3b5eec6c..4b1f9b80a 100644 --- a/lib/pavilion/test_run/test_run.py +++ b/lib/pavilion/test_run/test_run.py @@ -3,6 +3,7 @@ # pylint: disable=too-many-lines # pylint: disable=W0221 +# pylint: disable=invalid-name import copy import json import logging @@ -15,7 +16,7 @@ import uuid import os from pathlib import Path -from typing import TextIO, Union, Dict, Optional, List +from typing import Any, TextIO, Union, Dict, Optional, List import yc_yaml as yaml from pavilion.config import PavConfig @@ -40,6 +41,8 @@ from pavilion.types import ID_Pair from pavilion.micro import get_nested, consume from pavilion.timing import wait +from pavilion.test_ids import TestID, SeriesID +from pavilion.id_utils import resolve_relative_id from .test_attrs import TestAttributes @@ -65,8 +68,6 @@ class TestRun(TestAttributes): 5. Results are gathered. -- ``test.gather_results()`` :ivar int ~.id: The test id number. - :ivar str ~.full_id: The full test id number, including the config label. This - may also be a string path to the test itself. :ivar str cfg_label: The config label for the configuration directory that defined this test. This is ephemeral, and may change between Pavilion invocations based on available configurations. @@ -100,8 +101,10 @@ class TestRun(TestAttributes): BUILD_TEMPLATE_DIR = 'templates' """Directory that holds build templates.""" - def __init__(self, pav_cfg: PavConfig, config: Dict, var_man: VariableSetManager = None, - _id: int = None, rebuild: bool = False, build_only: bool = False): + def __init__(self, pav_cfg: PavConfig, config: Dict[str, Any], + var_man: Optional[VariableSetManager] = None, _id: Optional[TestID] = None, + series_id: Optional[SeriesID] = None, rebuild: bool = False, + build_only: bool = False): """Create an new TestRun object. If loading an existing test instance, use the ``TestRun.from_id()`` method. @@ -128,19 +131,26 @@ def __init__(self, pav_cfg: PavConfig, config: Dict, var_man: VariableSetManager self.working_dir = Path(config['working_dir']) tests_path = self.working_dir/self.RUN_DIR + series_path = self.working_dir / "series" / series_id if series_id else None self.config = config self._validate_config() + test_uuid = uuid.uuid4().hex + # Get an id for the test, if we weren't given one. if new_test: + uuid_path = tests_path / test_uuid + uuid_path.mkdir() # These will be set by save() or on load. try: - id_tmp, run_path = dir_db.create_id_dir(tests_path) + if series_path is not None: + _, series_test_path = dir_db.create_id_dir(series_path, link_target=uuid_path) except (OSError, TimeoutError) as err: raise TestRunError("Could not create test id directory at '{}'" .format(tests_path), err) - super().__init__(path=run_path, load=False) + super().__init__(path=uuid_path, load=False) + self.id = TestID(test_uuid) self._variables_path = self.path / 'variables' self.var_man = None self.status = None @@ -148,7 +158,6 @@ def __init__(self, pav_cfg: PavConfig, config: Dict, var_man: VariableSetManager self.build_name = None # Set basic attributes - self.id = id_tmp # pylint: disable=invalid-name self.build_only = build_only self._complete = False self.created = time.time() @@ -163,14 +172,15 @@ def __init__(self, pav_cfg: PavConfig, config: Dict, var_man: VariableSetManager self.suite_path = Path(suite_path) self.user = utils.get_login() - self.uuid = str(uuid.uuid4()) if var_man is None: var_man = VariableSetManager() self.var_man = var_man else: + uuid_path = tests_path / str(_id) # Load the test info from the given id path. - super().__init__(path=dir_db.make_id_path(tests_path, _id)) + super().__init__(path=uuid_path) + self.id = _id if not self.path.is_dir(): raise TestRunNotFoundError( "No test with id '{}' could be found.".format(self.id)) @@ -185,6 +195,7 @@ def __init__(self, pav_cfg: PavConfig, config: Dict, var_man: VariableSetManager raise TestRunError("Error loading variable set for test {}".format(self.id), err) + self.uuid = test_uuid self.sys_name = self.var_man.get('sys_name', '') self.test_version = config.get('test_version') @@ -208,7 +219,7 @@ def __init__(self, pav_cfg: PavConfig, config: Dict, var_man: VariableSetManager raise ValueError() except ValueError: raise TestRunError("The run.concurrent test config key must be a positive integer. " - "Test '{}' got '{}'".format(self.full_id, self.concurrent)) + "Test '{}' got '{}'".format(self.id, self.concurrent)) self.run_log = self.path/'run.log' self.build_log = self.path/'build.log' @@ -266,7 +277,7 @@ def suite_name(self) -> Optional[str]: @property def id_pair(self) -> ID_Pair: """Returns an ID_pair (a tuple of the working dir and test id).""" - return ID_Pair((self.working_dir, self.id)) + return ID_Pair((self.working_dir, self.uuid)) @property def series(self) -> Union[str, None]: @@ -422,55 +433,38 @@ def _validate_config(self): "being defined in the pavilion config.") @classmethod - def parse_raw_id(cls, pav_cfg, raw_test_id: str) -> ID_Pair: + def parse_raw_id(cls, pav_cfg: PavConfig, test_id: TestID, legacy: bool = False) -> ID_Pair: """Parse a raw test run id and return the label, working_dir, and id for that test. The test run need not exist, but the label must.""" - parts = raw_test_id.split('.', 1) - if not parts: - raise TestRunNotFoundError("Blank test run id given") - elif len(parts) == 1: - cfg_label = 'main' - test_id = parts[0] - else: - cfg_label, test_id = parts - - try: - test_id = int(test_id) - except ValueError: - raise TestRunNotFoundError("Invalid test id with label '{}': '{}'" - .format(cfg_label, test_id)) - - if cfg_label not in pav_cfg.configs: - raise TestRunNotFoundError( - "Invalid test label: '{}', label not found. Valid labels are {}" - .format(cfg_label, tuple(pav_cfg.configs.keys()))) - - working_dir = pav_cfg.configs[cfg_label]['working_dir'] - - return ID_Pair((working_dir, test_id)) + return ID_Pair((pav_cfg.working_dir, test_id)) @classmethod - def load_from_raw_id(cls, pav_cfg, raw_test_id: str) -> 'TestRun': + def load_from_raw_id(cls, pav_cfg: PavConfig, raw_test_id: TestID, + legacy: bool = False) -> 'TestRun': """Load a test given a raw test id string, in the form [label].test_id. The optional label will allow us to look up the config path for the test.""" - working_dir, test_id = cls.parse_raw_id(pav_cfg, raw_test_id) + working_dir, test_id = cls.parse_raw_id(pav_cfg, raw_test_id, legacy) - return cls.load(pav_cfg, working_dir, test_id) + return cls.load(pav_cfg, working_dir, test_id, legacy) @classmethod - def load(cls, pav_cfg, working_dir: Path, test_id: int) -> 'TestRun': + def load(cls, pav_cfg, working_dir: Path, test_id: TestID, legacy: bool = False) -> 'TestRun': """Load an old TestRun object given a test id. :param pav_cfg: The pavilion config :param working_dir: The working directory where this test run lives. - :param int test_id: The test's id number. + :param test_id: The test's id number. + :param legacy: Whether or not to treat the ID as a legacy ID. :rtype: TestRun """ - path = dir_db.make_id_path(working_dir / cls.RUN_DIR, test_id) + if test_id.is_relative() and not legacy: + test_id = resolve_relative_id(pav_cfg, working_dir, test_id) + + path = dir_db.make_id_path(working_dir / cls.RUN_DIR, test_id.id) if not path.is_dir(): raise TestRunError("Test directory for test id {} does not exist " @@ -498,7 +492,7 @@ def finalize(self, new_vars: VariableSetManager): if not self.saved: raise RuntimeError("You must call the 'test.save()' method before " - "you can finalize a test. Test: {}".format(self.full_id)) + "you can finalize a test. Test: {}".format(self.id)) self._save_config() # Save our newly updated variables. @@ -509,7 +503,7 @@ def finalize(self, new_vars: VariableSetManager): create_files.create_file(file, self.build_path, contents) except TestConfigError as err: raise TestRunError("Test run '{}' Could not create build script." - .format(self.full_id), err) + .format(self.id), err) for tmpl_src, tmpl_dest in self.config['run'].get('templates', {}).items(): try: @@ -517,7 +511,7 @@ def finalize(self, new_vars: VariableSetManager): create_files.create_file(tmpl_dest, self.build_path, tmpl, newlines='') except TestConfigError as err: raise TestRunError("Test run '{}' could not create run script." - .format(self.full_id, err)) + .format(self.id, err)) self.save_attributes() @@ -551,7 +545,7 @@ def run_cmd(self): pav_path = self._pav_cfg.pav_root/'bin'/'pav' - return '{} run {}'.format(pav_path, self.full_id) + return '{} run {}'.format(pav_path, self.id) def _save_config(self): """Save the configuration for this test to the test config file.""" @@ -668,7 +662,7 @@ def build(self, cancel_event=None, tracker: BuildTracker = None): if not self.saved: raise RuntimeError("The .save() method must be called before you " - "can build test '{}'".format(self.full_id)) + "can build test '{}'".format(self.id)) if self.build_origin_path.exists(): raise RuntimeError( @@ -684,7 +678,7 @@ def build(self, cancel_event=None, tracker: BuildTracker = None): # evaluated to true return True - if self.builder.build(self.full_id, tracker=tracker, + if self.builder.build(self.id, tracker=tracker, cancel_event=cancel_event): # Create the build origin path, to make tracking a test's build # a bit easier. @@ -695,7 +689,7 @@ def build(self, cancel_event=None, tracker: BuildTracker = None): try: if not built_by_path.exists(): with built_by_path.open('w') as built_by: - built_by.write(str(self.full_id)) + built_by.write(str(self.id)) built_by_path.chmod(0o440) except OSError as err: tracker.warn("Could not create built_by file: {}".format(err.args), @@ -751,7 +745,7 @@ def run(self): if not self.saved: raise RuntimeError("You must call the .save() method before running " - "test {}".format(self.full_id)) + "test {}".format(self.id)) if self.build_only: self.status.set( @@ -774,7 +768,7 @@ def run(self): run_wd = self.build_path.as_posix() # Run scripts take the test id as a first argument. - cmd = [self.run_script_path.as_posix(), self.full_id] + cmd = [self.run_script_path.as_posix(), str(self.id)] proc = subprocess.Popen(cmd, cwd=run_wd, stdout=run_log, @@ -873,7 +867,7 @@ def set_run_complete(self) -> None: if not self.saved: raise RuntimeError("You must call the .save() method before run {} " - "can be marked complete.".format(self.full_id)) + "can be marked complete.".format(self.id)) complete_path = self.path/self.COMPLETE_FN @@ -925,7 +919,7 @@ def wait(self, timeout=None): if timeout is not None and time.time() > timeout: raise TimeoutError("Timed out waiting for test '{}' to " - "complete".format(self.full_id)) + "complete".format(self.id)) def gather_results(self, run_result: int, regather: bool = False, log_file: TextIO = None): @@ -1042,7 +1036,7 @@ def save_results(self, results: Dict) -> None: if not self.saved: raise RuntimeError("You must call the .save() method before saving " - "results for test {}".format(self.full_id)) + "results for test {}".format(self.id)) results_tmp_path = self.results_path.with_suffix('.tmp') with results_tmp_path.open('w') as results_file: @@ -1249,7 +1243,7 @@ def _write_script(self, stype: str, path: Path, config: dict, module_wrappers: d script.write(path) def __repr__(self): - return "TestRun({s.name}-{s.full_id})".format(s=self) + return "TestRun({s.name}-{s.id})".format(s=self) def _get_permute_vars(self): """Return the permute var values in a dictionary.""" diff --git a/lib/pavilion/types.py b/lib/pavilion/types.py index f9177eec2..2c2853942 100644 --- a/lib/pavilion/types.py +++ b/lib/pavilion/types.py @@ -1,8 +1,10 @@ from pathlib import Path from typing import NewType, Tuple, Dict, Any, List, FrozenSet, Union +from pavilion.test_ids import TestID + # pylint: disable=invalid-name -ID_Pair = NewType('ID_Pair', Tuple[Path, int]) +ID_Pair = NewType('ID_Pair', Tuple[Path, TestID]) NodeInfo = NewType('NodeInfo', Dict[str, Any]) Nodes = NewType('Nodes', Dict[str, NodeInfo]) NodeList = NewType('NodeList', List[str]) diff --git a/lib/pavilion/unittest.py b/lib/pavilion/unittest.py index 686fb1bc2..d7e7e976b 100644 --- a/lib/pavilion/unittest.py +++ b/lib/pavilion/unittest.py @@ -353,6 +353,7 @@ def _quick_test(self, cfg=None, name="quick_test", cfg = resolve.test_config(cfg, var_man) test = TestRun(pav_cfg=self.pav_cfg, config=cfg, var_man=var_man) + if test.skipped: # You can't proceed further with a skipped test. return test @@ -368,6 +369,7 @@ def _quick_test(self, cfg=None, name="quick_test", fin_sched_vars = sched.get_final_vars(test) fin_var_man.add_var_set('sched', fin_sched_vars) test.finalize(fin_var_man) + return test def wait_tests(self, working_dir: Path, timeout=5): diff --git a/lib/pavilion/utils.py b/lib/pavilion/utils.py index 877384803..a13d65e38 100644 --- a/lib/pavilion/utils.py +++ b/lib/pavilion/utils.py @@ -70,6 +70,16 @@ def is_int(val: str): return True +def is_hash(val: str, hash_len: int) -> bool: + """Return true if the given string value is a hexidecimal hash.""" + + try: + int(val, 16) + except ValueError: + return False + + return len(val) == hash_len + def str_bool(val): """Returns true if the string value is the string 'true' with allowances for capitalization.""" diff --git a/lib/pavilion/variables.py b/lib/pavilion/variables.py index 7f62eb8af..b03355617 100644 --- a/lib/pavilion/variables.py +++ b/lib/pavilion/variables.py @@ -634,7 +634,7 @@ def save(self, path): except (OSError, IOError, FileNotFoundError) as err: raise VariableError( "Could not write variable file at '{}'" - .format(tmp_path), err) + .format(tmp_path), prior_error=err) start = time.time() while time.time() - start < 100: @@ -656,7 +656,7 @@ def load(cls, path): data = json.load(stream) except (json.decoder.JSONDecodeError, IOError, FileNotFoundError) \ as err: - raise VariableError("Could not load variable file '{}'".format(path), err) + raise VariableError("Could not load variable file '{}'".format(path), prior_error=err) var_man = cls() diff --git a/test/data/modules/test_mod1/1.10 b/test/data/modules/test_mod1/1.1 similarity index 100% rename from test/data/modules/test_mod1/1.10 rename to test/data/modules/test_mod1/1.1 diff --git a/test/tests/builder_tests.py b/test/tests/builder_tests.py index bff2f2d48..262b2d80e 100644 --- a/test/tests/builder_tests.py +++ b/test/tests/builder_tests.py @@ -40,7 +40,7 @@ def test_build_locking(self): for test in tests: tracker = mb_tracker.register(test) - thread = threading.Thread(target=test.builder.build, args=(test.full_id, tracker)) + thread = threading.Thread(target=test.builder.build, args=(test.id, tracker)) threads.append(thread) thread.run() diff --git a/test/tests/cancel_cmd_tests.py b/test/tests/cancel_cmd_tests.py index 405468a3b..b656b7fdb 100644 --- a/test/tests/cancel_cmd_tests.py +++ b/test/tests/cancel_cmd_tests.py @@ -2,12 +2,13 @@ import errno import sys +import uuid -import pavilion.series from pavilion import arguments from pavilion import commands from pavilion import plugins from pavilion.status_utils import get_statuses +from pavilion.id_utils import load_user_series_id from pavilion.unittest import PavTestCase @@ -49,7 +50,7 @@ def test_cancel_invalid_test(self): args = arg_parser.parse_args([ 'cancel', - 'test.{}'.format(sys.maxsize) + str(uuid.uuid4().hex) ]) cancel_cmd = commands.get_command(args.command_name) @@ -77,7 +78,7 @@ def test_cancel_series(self): tests = [] - series_id = pavilion.series.load_user_series_id(self.pav_cfg) + series_id = load_user_series_id(self.pav_cfg) tests.append(series_id) args = arg_parser.parse_args([ diff --git a/test/tests/cancel_tests.py b/test/tests/cancel_tests.py index b0aa9e735..c8fbf7403 100644 --- a/test/tests/cancel_tests.py +++ b/test/tests/cancel_tests.py @@ -4,6 +4,7 @@ from pavilion import schedulers from pavilion import unittest from pavilion.status_file import STATES +from pavilion.timing import wait class CancelTests(unittest.PavTestCase): @@ -15,7 +16,7 @@ def test_cancel_jobs(self): test_cfg = self._quick_test_cfg() test_cfg['run']['cmds'] = ['sleep 5'] test_cfg['scheduler'] = 'dummy' - test_cfg['schedule'] = {'nodes': 'all'} + test_cfg['schedule'] = {'nodes': 'all'} test1 = self._quick_test(test_cfg, finalize=False) test2 = self._quick_test(test_cfg, finalize=False) @@ -27,11 +28,8 @@ def test_cancel_jobs(self): test1.cancel("For fun") # Wait till we know test2 is running - while not test1.complete: - time.sleep(0.1) - - while not test2.status.has_state(STATES.RUNNING): - time.sleep(0.1) + wait(lambda: test1.complete, interval=0.2, timeout=10) + wait(lambda: test2.status.has_state(STATES.RUNNING), interval=0.2, timeout=10) jobs = cancel_utils.cancel_jobs(self.pav_cfg, [test1, test2]) self.assertEqual(test2.status.current().state, STATES.RUNNING) diff --git a/test/tests/cat_tests.py b/test/tests/cat_tests.py index 6df21ba86..d8b33810a 100644 --- a/test/tests/cat_tests.py +++ b/test/tests/cat_tests.py @@ -17,7 +17,7 @@ def test_cat(self): cat_cmd.outfile = cat_cmd.errfile = io.StringIO() arg_parser = arguments.get_parser() - arg_sets = (['cat', test.full_id, 'run.tmpl'],) + arg_sets = (['cat', str(test.id), 'run.tmpl'],) for arg_set in arg_sets: args = arg_parser.parse_args(arg_set) cat_cmd.run(self.pav_cfg, args) diff --git a/test/tests/cmd_util_tests.py b/test/tests/cmd_util_tests.py index 395332a4d..5b0220f78 100644 --- a/test/tests/cmd_util_tests.py +++ b/test/tests/cmd_util_tests.py @@ -27,7 +27,7 @@ def test_load_last_series(self): last_series = cmd_utils.load_last_series(self.pav_cfg, io.StringIO()) - self.assertEqual(last_series.sid, run_cmd.last_series.sid) + self.assertEqual(last_series.id, run_cmd.last_series.id) def test_arg_filtered_tests(self): """Make sure basic requests for tests work.""" @@ -47,19 +47,27 @@ def test_arg_filtered_tests(self): # This just loads the arguments for the status command. commands.get_command('status') - tests1 = [test.full_id for test in series1.tests.values()] + tests1 = [str(test.id) for test in series1.tests.values()] for argset, count in [ - (('status', series1.sid, series2.sid), 6), - (('status', '{}-{}'.format(series1.sid, series2.sid)), 6), + (('status', str(series1.id), str(series2.id)), 6), + (('status', '{}-{}'.format(series1.id, series2.id)), 6), (('status', 'all', '--filter', 'name=arg_filtered.*'), 3), (('status', ) + tuple(tests1), 3), ]: args = arguments.get_parser().parse_args(argset) - args.tests = resolve_mixed_ids(args.tests) - - self.assertEqual(len(cmd_utils.arg_filtered_tests(self.pav_cfg, args).paths), count) + ids = resolve_mixed_ids(args.tests) + tests = ids["tests"] + series = ids["series"] + + self.assertEqual(len(cmd_utils.arg_filtered_tests( + self.pav_cfg, + tests, + series, + filter_query=args.filter, + sort_by=args.sort_by, + limit=args.limit).paths), count) # TODO: We really need to add unit tests for each of the cmd utils functions. diff --git a/test/tests/dir_db_tests.py b/test/tests/dir_db_tests.py index ab123e053..e401b129e 100644 --- a/test/tests/dir_db_tests.py +++ b/test/tests/dir_db_tests.py @@ -95,7 +95,7 @@ def _make_entry(self, index_path, id_, complete=True, d=0): 'd': d, 'complete': complete} - key = str(id_) + key = hex(id_)[2:] path = index_path / key path.mkdir(exist_ok=True) with (path / 'data').open('w') as data_file: diff --git a/test/tests/filter_tests.py b/test/tests/filter_tests.py index 845e2e44e..b9a167f0e 100644 --- a/test/tests/filter_tests.py +++ b/test/tests/filter_tests.py @@ -251,7 +251,7 @@ def test_filter_states(self): t_filter = filters.parse_query("state=RUN_DONE") t_filter2 = filters.parse_query("has_state=RUNNING") - agg1 = test + agg1 = test self.assertFalse(t_filter(agg1)) @@ -308,6 +308,7 @@ def test_get_sort_opts(self): paths=paths, transform=test_run_attr_transform, order_func=sort, order_asc=ascending).data + self.assertEqual([t['id'] for t in sorted_tests], ids) # And descending. @@ -323,9 +324,9 @@ def test_get_sort_opts(self): def test_error_on_bad_query(self): with self.assertRaises(FilterParseError): test_filter = filters.parse_query("garbage") - + def test_validators(self): - + @validate_int def ret_int(_): return 42 @@ -349,7 +350,7 @@ def ret_datetime(_): self.assertTrue(ret_int(None, "=", "42")) self.assertFalse(ret_int(None, "=", "40")) self.assertTrue(ret_int(None, ">=", "0")) - + with self.assertRaises(FilterParseError): ret_int(None, "!", "57") @@ -373,9 +374,9 @@ def ret_datetime(_): self.assertTrue(ret_str_list(None, "=", "CHESS")) self.assertFalse(ret_str_list(None, "=", "parcheesi")) - + with self.assertRaises(FilterParseError): - ret_str_list(None, "💩", "glass") + ret_str_list(None, "💩", "glass") self.assertTrue(ret_datetime(None, ">", "1945-09-06")) self.assertFalse(ret_datetime(None, "<", "1945-11-11T11:00")) @@ -399,7 +400,7 @@ def test_filter_boolean_logic(self): """Test that the filter's three-valued logic works as expected (as specified by Paul). See transformer.py for detailed specification.""" - + test_dict = { 'name': None, 'user': 'Batman', diff --git a/test/tests/general_tests.py b/test/tests/general_tests.py index 09e5aae37..49db75c7e 100644 --- a/test/tests/general_tests.py +++ b/test/tests/general_tests.py @@ -9,6 +9,7 @@ import yc_yaml as yaml from pavilion.test_run import TestRun from pavilion import utils +from pavilion.test_ids import TestID from pavilion.unittest import PavTestCase @@ -121,14 +122,14 @@ def test_legacy_runs(self): shutil.copytree(run_path.as_posix(), dst_path.as_posix(), symlinks=True) - run_id = 'test.{}'.format(run) + run_id = TestID(run) # Move the build directory into place build_dst = Path(os.readlink((run_path/'build_origin').as_posix())) build_dst = dst_path/build_dst (dst_path/'build_dir').rename(build_dst) - test = TestRun.load_from_raw_id(self.pav_cfg, run_id) + test = TestRun.load_from_raw_id(self.pav_cfg, run_id, legacy=True) self.assertTrue(test.results) self.assertTrue(test.complete) diff --git a/test/tests/graph_cmd_tests.py b/test/tests/graph_cmd_tests.py index 3f5906451..ca65a6a7a 100644 --- a/test/tests/graph_cmd_tests.py +++ b/test/tests/graph_cmd_tests.py @@ -2,6 +2,7 @@ from pavilion import arguments from pavilion import commands +from pavilion.test_ids import resolve_mixed_ids from pavilion.unittest import PavTestCase @@ -156,6 +157,8 @@ def test_graph_cmd(self): '-o', '/tmp/foo.png', '--x', 'id', '--y', 'id', - ] + [test.full_id for test in tests]) + ] + [str(test.id) for test in tests]) + + args.series = [] cmd.run(self.pav_cfg, args) diff --git a/test/tests/group_tests.py b/test/tests/group_tests.py index 08399c824..a759a1335 100644 --- a/test/tests/group_tests.py +++ b/test/tests/group_tests.py @@ -6,6 +6,7 @@ from pavilion.errors import TestGroupError from pavilion.series_config import generate_series_config from pavilion.test_run import TestRun +from pavilion.test_ids import GroupID, TestID import shutil import uuid @@ -18,7 +19,7 @@ def _make_group_name(self): _ = self - return 'grp_' + uuid.uuid4().hex[:10] + return GroupID('grp_' + uuid.uuid4().hex[:10]) def _make_example(self): """Make an example group, and a tuple of a test, series, and sub-group.""" @@ -30,7 +31,7 @@ def _make_example(self): series1 = series.TestSeries(self.pav_cfg, series_cfg) series1._add_tests([tr2], 'bob') sub_group = groups.TestGroup(self.pav_cfg, self._make_group_name()) - self.assertEqual(sub_group.add([tr3]), ([('test', tr3.full_id)], [])) + self.assertEqual(sub_group.add([tr3]), ([tr3.id], [])) group = groups.TestGroup(self.pav_cfg, self._make_group_name()) @@ -40,33 +41,16 @@ def assertGroupContentsEqual(self, test_group, items): """Verify that the group's contents match the given items ((itype, name) tuples).""" members = [] for mem in test_group.members(): - members.append((mem['itype'], mem['id'])) + members.append(mem['id']) item_tuples = [] for item in items: if isinstance(item, groups.TestGroup): - item_tuples.append(('group', item.name)) - elif isinstance(item, series.TestSeries): - item_tuples.append(('series', item.sid)) + item_tuples.append(item.name) else: - item_tuples.append(('test', item.full_id)) + item_tuples.append(item.id) - members.sort() - item_tuples.sort() - self.assertEqual(members, item_tuples) - - def test_group_init(self): - """Check that object initialization and basic status functions work.""" - - group = groups.TestGroup(self.pav_cfg, 'init_test_group') - - self.assertFalse(group.exists()) - group.create() - self.assertTrue(group.exists()) - - for bad_name in ('s123', '-as3', '327bb', 'a b'): - with self.assertRaisesRegex(TestGroupError, r'Invalid group name'): - group = groups.TestGroup(self.pav_cfg, bad_name) # Bad group name. + self.assertEqual(set(members), set(item_tuples)) def test_member_info(self): """Check that member info gathering works the same if given an object or a string.""" @@ -74,8 +58,8 @@ def test_member_info(self): group, (test, series1, sub_group) = self._make_example() for obj, str_rep in ( - (test, test.full_id), - (series1, series1.sid), + (test, test.id), + (series1, series1.id), (sub_group, sub_group.name)): self.assertEqual(group._get_member_info(obj), group._get_member_info(str_rep)) @@ -87,9 +71,9 @@ def test_group_add(self): test, series1, sub_group = items added, errors = group.add(items) self.assertEqual(errors, []) - added_answer = [('test', test.full_id), - ('series', series1.sid), - ('group', sub_group.name)] + added_answer = [test.id, + series1.id, + sub_group.name] added2, errors = group.add(items) self.assertEqual(errors, []) self.assertEqual(added2, []) @@ -115,18 +99,18 @@ def test_group_remove(self): # Remove a single item, to make sure other items are preserved removed, errors = group.remove([series1]) self.assertEqual(errors, []) - self.assertEqual(removed, [('series', series1.sid)]) + self.assertEqual(removed, [series1.id]) self.assertGroupContentsEqual(group, [test, sub_group]) # Remove multiple items. removed, errors = group.remove([test, sub_group]) self.assertEqual(errors, []) - self.assertEqual(removed, [('test', test.full_id), ('group', sub_group.name)]) + self.assertEqual(removed, [test.id, sub_group.name]) self.assertGroupContentsEqual(group, []) - removed, errors = group.remove(['nope', 'a.1', 'test.982349842', 's1234981234']) + removed, errors = group.remove([GroupID('nope')]) self.assertEqual(removed, []) - self.assertEqual(len(errors), 4) + self.assertEqual(len(errors), 1) def test_group_exclusions(self): """Check that excluded tests are handled properly.""" @@ -138,23 +122,20 @@ def test_group_exclusions(self): s_test = list(series1.tests.values())[0] g_test = sub_group.tests()[0] g_test = g_test.resolve() - g_test = TestRun.load(self.pav_cfg, g_test.parents[1], int(g_test.name)) + g_test = TestRun.load(self.pav_cfg, g_test.parents[1], TestID(g_test.name)) removed, warnings = group.remove([g_test, s_test]) self.assertEqual(warnings, []) - removed.sort() - answer = sorted([(group.EXCL_ITYPE, s_test.full_id), - (group.EXCL_ITYPE, g_test.full_id)]) - self.assertEqual(removed, answer) - self.assertEqual(group._excluded(), {s_test.full_id: s_test.path, - g_test.full_id: g_test.path}) + answer = [s_test.id, g_test.id] + self.assertEqual(set(removed), set(answer)) + self.assertEqual(group._excluded(), {s_test.id: s_test.path, + g_test.id: g_test.path}) self.assertEqual(group.tests(), [btest.path]) group.remove([sub_group.name]) added, warnings = group.add([s_test, g_test]) - self.assertEqual(sorted(added), [('test', g_test.full_id), - ('test*', s_test.full_id)]) + self.assertEqual(sorted(added), sorted([g_test.id, s_test.id])) self.assertEqual(warnings, []) def test_group_clean(self): @@ -186,19 +167,19 @@ def test_group_rename(self): new_name = self._make_group_name() sub_group.rename(new_name) self.assertEqual(sub_group.name, new_name) - self.assertEqual(sub_group.path.name, new_name) + self.assertEqual(GroupID(sub_group.path.name), new_name) self.assertTrue(sub_group.exists()) - self.assertIn(('group', new_name), group.member_tuples()) - self.assertNotIn(('group', old_name), group.member_tuples()) + self.assertIn(new_name, group) + self.assertNotIn(old_name, group) new_name2 = self._make_group_name() sub_group.rename(new_name2, redirect_parents=False) self.assertEqual(sub_group.name, new_name2) - self.assertEqual(sub_group.path.name, new_name2) + self.assertEqual(GroupID(sub_group.path.name), new_name2) self.assertTrue(sub_group.exists()) # The group doesn't exist under the old renaming, and we didn't rename it. - self.assertIn(('group', new_name), group.member_tuples()) - self.assertNotIn(('group', new_name2), group.member_tuples()) + self.assertIn(new_name, group) + self.assertNotIn(new_name2, group) def test_group_commands(self): """Check the operation of various group command statements.""" @@ -214,8 +195,8 @@ def test_group_commands(self): parser = arguments.get_parser() # Start a series of tests two ways, each assigned to a group. - run_args = parser.parse_args(['run', '-g', group_name, 'hello_world']) - series_args = parser.parse_args(['series', 'run', '-g', group_name, 'basic']) + run_args = parser.parse_args(['run', '-g', str(group_name), 'hello_world']) + series_args = parser.parse_args(['series', 'run', '-g', str(group_name), 'basic']) run_cmd.run(self.pav_cfg, run_args) series_cmd.run(self.pav_cfg, series_args) @@ -234,11 +215,11 @@ def test_group_commands(self): # Create a new group with tests to add sub_group_name = self._make_group_name() - run_args3 = parser.parse_args(['run', '-g', sub_group_name, 'hello_world']) + run_args3 = parser.parse_args(['run', '-g', str(sub_group_name), 'hello_world']) run_cmd.run(self.pav_cfg, run_args3) run_cmd.last_series.wait(timeout=10) - add_items = [sub_group_name] + [test.full_id for test in run_cmd.last_tests] + add_items = [str(sub_group_name)] + [str(test.id) for test in run_cmd.last_tests] rm_tests = add_items[1:3] def run_grp_cmd(args): @@ -249,35 +230,38 @@ def run_grp_cmd(args): members = group.members() # Add tests and a group via commands - run_grp_cmd(['group', 'add', group_name] + add_items) + + run_grp_cmd(['group', 'add', str(group_name)] + add_items) self.assertEqual(len(group.tests()), 10) # Remove a couple tests - run_grp_cmd(['group', 'remove', group_name] + rm_tests) + run_grp_cmd(['group', 'remove', str(group_name)] + rm_tests) self.assertEqual(len(group.tests()), 8) # Rename the subgroup new_name1 = self._make_group_name() new_name2 = self._make_group_name() - run_grp_cmd(['group', 'rename', sub_group_name, new_name1]) + run_grp_cmd(['group', 'rename', str(sub_group_name), str(new_name1)]) self.assertEqual(len(group.tests()), 8) - run_grp_cmd(['group', 'rename', '--no-redirect', new_name1, new_name2]) + run_grp_cmd(['group', 'rename', '--no-redirect', str(new_name1), str(new_name2)]) self.assertEqual(len(group.tests()), 5) - run_grp_cmd(['group', 'rename', new_name2, new_name1]) + run_grp_cmd(['group', 'rename', str(new_name2), str(new_name1)]) self.assertEqual(len(group.tests()), 8) # Try all the list options for rows, args in [ - (7, ['group', 'members', group_name]), - (4, ['group', 'members', '--tests', group_name]), - (5, ['group', 'members', '--series', group_name]), - (4, ['group', 'members', '--groups', group_name]), - (7, ['group', 'members', '--tests', '--series', '--groups', group_name]), - (8, ['group', 'members', '--recursive', group_name]), + (7, ['group', 'members', str(group_name)]), + (4, ['group', 'members', '--tests', str(group_name)]), + (5, ['group', 'members', '--series', str(group_name)]), + (4, ['group', 'members', '--groups', str(group_name)]), + (7, ['group', 'members', '--tests', '--series', '--groups', str(group_name)]), + (8, ['group', 'members', '--recursive', str(group_name)]), ]: run_grp_cmd(args) out, err_out = group_cmd.clear_output() - self.assertEqual(len(out.split('\n')), rows, + self.assertEqual( + len(out.split('\n')), + rows, msg="unexpected lines for {}:\n{}" .format(args, out)) @@ -289,5 +273,5 @@ def run_grp_cmd(args): # Delete the renamed sub-group - run_grp_cmd(['group', 'delete', new_name1]) + run_grp_cmd(['group', 'delete', str(new_name1)]) self.assertEqual(len(group.tests()), 5) diff --git a/test/tests/list_cmd_tests.py b/test/tests/list_cmd_tests.py index 4611559ed..d3656fada 100644 --- a/test/tests/list_cmd_tests.py +++ b/test/tests/list_cmd_tests.py @@ -5,6 +5,7 @@ from pavilion import commands from pavilion.series.series import TestSeries from pavilion.test_run import TestAttributes +from pavilion.test_ids import resolve_mixed_ids from pavilion.unittest import PavTestCase @@ -42,11 +43,12 @@ def test_list_cmd(self): args = parser.parse_args(['list', 'test_runs', '--limit=15', '--filter', 'name=*.list_cmd_tests_*']) + self.assertEqual(cmd.run(self.pav_cfg, args), 0) out, err = cmd.clear_output() self.assertEqual(err, '') - self.assertEqual([int(t) for t in out.split()], - [t.id for t in tests[:15]]) + self.assertEqual([t for t in out.split()], + [str(t.id) for t in tests[:15]]) args = parser.parse_args( ['list', '--multi-line', 'test_runs', '--sort-by=created', @@ -54,8 +56,8 @@ def test_list_cmd(self): self.assertEqual(cmd.run(self.pav_cfg, args), 0) out, err = cmd.clear_output() # 26-30 are filtered due to the default newer-than time. - self.assertEqual([int(t) for t in out.strip().splitlines()], - [t.id for t in list(reversed(tests))][:15]) + self.assertEqual([t for t in out.strip().splitlines()], + [str(t.id) for t in list(reversed(tests))][:15]) all_out_fields = ','.join(TestAttributes.list_attrs()) args = parser.parse_args( @@ -68,11 +70,11 @@ def test_list_cmd(self): id_idx = TestAttributes.list_attrs().index('id') for line in lines: parts = [part.strip() for part in line.split('|')] - ids.append(int(parts[id_idx])) + ids.append(parts[id_idx]) # 26-30 are filtered due to the default newer-than time. self.assertEqual(ids, - [t.id for t in tests if t.complete]) + [str(t.id) for t in tests if t.complete]) args = parser.parse_args( ['list', '--csv', '--out-fields={}'.format(all_out_fields), @@ -82,7 +84,7 @@ def test_list_cmd(self): rows = [line.split(",") for line in out.strip().splitlines()] ids = [int(row[id_idx]) for row in rows] self.assertEqual(ids, - [t.id for t in tests if (t.result == t.PASS)]) + [str(t.id) for t in tests if (t.result == t.PASS)]) for arglist in [ ['list', '--long', '--header', '--vsep=$', 'runs'], diff --git a/test/tests/log_cmd_tests.py b/test/tests/log_cmd_tests.py index 33d55b171..a0a03c4e1 100644 --- a/test/tests/log_cmd_tests.py +++ b/test/tests/log_cmd_tests.py @@ -30,7 +30,7 @@ def test_log_arguments(self): time.sleep(.1) # test `pav log run test` - args = parser.parse_args(['run', test.full_id]) + args = parser.parse_args(['run', str(test.id)]) result = log_cmd.run(self.pav_cfg, args) out, err = log_cmd.clear_output() self.assertEqual(err, '') @@ -39,7 +39,7 @@ def test_log_arguments(self): # test `pav log build test` # note: echo-ing hello world should not require anything to be built - args = parser.parse_args(['build', test.full_id]) + args = parser.parse_args(['build', str(test.id)]) log_cmd.run(self.pav_cfg, args) out, err = log_cmd.clear_output() out_data = '\n'.join(line for line in out.split('\n') @@ -49,7 +49,7 @@ def test_log_arguments(self): # test `pav log kickoff test` # note: in general, kickoff.log should be an empty file - args = parser.parse_args(['kickoff', test.full_id]) + args = parser.parse_args(['kickoff', str(test.id)]) result = log_cmd.run(self.pav_cfg, args) out, err = log_cmd.clear_output() self.assertEqual(out, '') @@ -89,7 +89,7 @@ def test_log_tail(self): while not test.complete and time.time() < end: time.sleep(.1) - args = parser.parse_args(['--tail', '3', 'run', test.full_id]) + args = parser.parse_args(['--tail', '3', 'run', str(test.id)]) result = log_cmd.run(self.pav_cfg, args) self.assertEqual(result, 0) out, err = log_cmd.clear_output() @@ -109,7 +109,7 @@ def test_follow(self): parser = argparse.ArgumentParser() log_cmd._setup_arguments(parser) - args = parser.parse_args(['--follow', 'run', test.full_id]) + args = parser.parse_args(['--follow', 'run', str(test.id)]) thread = threading.Thread(target=log_cmd.run, args=(self.pav_cfg, args)) thread.start() time.sleep(.2) @@ -133,10 +133,10 @@ def test_log_states(self): log_cmd._setup_arguments(parser) for args in ( - ('states', test.full_id), - ('states', '--raw', test.full_id), - ('states', '--raw_time', test.full_id), - ('states', '--raw', '--raw_time', test.full_id), + ('states', str(test.id)), + ('states', '--raw', str(test.id)), + ('states', '--raw_time', str(test.id)), + ('states', '--raw', '--raw_time', str(test.id)), ): args = parser.parse_args(args) self.assertEqual(log_cmd.run(self.pav_cfg, args), 0) diff --git a/test/tests/logging_tests.py b/test/tests/logging_tests.py index acef1b43d..ddfa80424 100644 --- a/test/tests/logging_tests.py +++ b/test/tests/logging_tests.py @@ -156,7 +156,7 @@ def test_series_file_logger(self): series.log_results() log_path = next(iter(series.get_result_paths()), None) - self.assertEqual(log_path.stem, series.sid) + self.assertEqual(log_path.stem, str(series.id)) with open(log_path) as fin: results = json.load(fin) @@ -175,6 +175,7 @@ def test_common_file_logger(self): ]) run_cmd = commands.get_command(args.command_name) + run_cmd.silence() self.pav_cfg["result_loggers"] = [{ "plugin": "common_file", @@ -183,6 +184,7 @@ def test_common_file_logger(self): self.assertEqual(run_cmd.run(self.pav_cfg, args, log_results=False), 0) series1 = run_cmd.last_series + series1.log_results() log_path = next(iter(series1.get_result_paths()), None) diff --git a/test/tests/mod_wrapper_tests.py b/test/tests/mod_wrapper_tests.py index 5918bc57b..90036a2cb 100644 --- a/test/tests/mod_wrapper_tests.py +++ b/test/tests/mod_wrapper_tests.py @@ -104,7 +104,7 @@ def test_add_module(self): test_cfg['run']['modules'] = [ '', # A blank module 'test_mod1/1.0', - 'test_mod1/1.10', + 'test_mod1/1.1', 'test_mod1', # Should load 1.1 as the default. 'test_mod2', # Un-versioned. ] @@ -117,8 +117,8 @@ def test_add_module(self): # test_mod1 only gets added once (no dups) '[[ "${mods_sorted}" == "test_mod1:test_mod2" ]] || exit 1', # test_mod2 has no version (but the module file appends it anyway.) - '[[ "${vers_sorted}" == "1.0:1.10:" ]] || ' - '[[ "${vers_sorted}" == "1.10::" ]] || exit 1' + '[[ "${vers_sorted}" == "1.0:1.1:" ]] || ' + '[[ "${vers_sorted}" == "1.1::" ]] || exit 1' ] test = self._quick_test(test_cfg) diff --git a/test/tests/result_tests.py b/test/tests/result_tests.py index cbf9aeb3e..a4e9d9f0f 100644 --- a/test/tests/result_tests.py +++ b/test/tests/result_tests.py @@ -819,14 +819,14 @@ def test_result_command(self): test.wait(10) res_args = arg_parser.parse_args( - ('result', '--full') + tuple(t.full_id for t in run_cmd.last_tests)) + ('result', '--full') + tuple(str(t.id) for t in run_cmd.last_tests)) if result_cmd.run(self.pav_cfg, res_args) != 0: cmd_out, cmd_err = result_cmd.clear_output() self.fail("Result command failed: \n{}\n{}" .format(cmd_out, cmd_err)) res_args = arg_parser.parse_args( - ('result',) + tuple(t.full_id for t in run_cmd.last_tests)) + ('result',) + tuple(str(t.id) for t in run_cmd.last_tests)) if result_cmd.run(self.pav_cfg, res_args) != 0: cmd_out, cmd_err = result_cmd.clear_output() self.fail("Result command failed: \n{}\n{}" @@ -834,7 +834,7 @@ def test_result_command(self): for test in run_cmd.last_tests: # Each of these tests should have a 'FAIL' as the result. - self.assertEqual(test.results['result'], TestRun.FAIL, + self.assertEqual(test.results['result'], TestRun.FAIL, msg='Should be FAIL {}'.format(test.results)) # Make sure we can re-run results, even with permutations. @@ -842,10 +842,11 @@ def test_result_command(self): result_cmd.clear_output() res_args = arg_parser.parse_args( ('result', '--re-run', '--json') + - tuple(t.full_id for t in run_cmd.last_tests)) + tuple(str(t.id) for t in run_cmd.last_tests)) result_cmd.run(rerun_cfg, res_args) data, err = result_cmd.clear_output() + results = json.loads(data) results = {res['name']: res for res in results} @@ -867,7 +868,7 @@ def test_result_command(self): # Make sure the log argument doesn't blow up. res_args = arg_parser.parse_args( - ('result', '--show-log') + (run_cmd.last_tests[0].full_id,)) + ('result', '--show-log') + (str(run_cmd.last_tests[0].id),)) if result_cmd.run(self.pav_cfg, res_args) != 0: cmd_out, cmd_err = result_cmd.clear_output() self.fail("Result command failed: \n{}\n{}" @@ -880,10 +881,10 @@ def test_result_command(self): test_cfg['build']['cmds'] = ['false'] bad_test = self._quick_test(test_cfg) res_args = arg_parser.parse_args( - ('result', '--re-run', bad_test.full_id)) + ('result', '--re-run', str(bad_test.id))) self.assertEqual(result_cmd.run(self.pav_cfg, res_args), 0) out, err = result_cmd.clear_output() - self.assertIn(bad_test.full_id, err) + self.assertIn(str(bad_test.id), err) def test_result_cmd_by_key(self): """Check the by-key and by-key-compat options.""" @@ -901,9 +902,9 @@ def test_result_cmd_by_key(self): self.fail("Run command failed: \n{}\n{}".format(cmd_out, cmd_err)) for test in run_cmd.last_tests: test.wait(10) - + res_args = arg_parser.parse_args( - ('result', '--by-key-compat', run_cmd.last_tests[0].full_id)) + ('result', '--by-key-compat', str(run_cmd.last_tests[0].id))) rslt = result_cmd.run(self.pav_cfg, res_args) cmd_out, cmd_err = result_cmd.clear_output() self.assertEqual(rslt, 0, "Result command failed: \n{}\n{}" @@ -912,7 +913,7 @@ def test_result_cmd_by_key(self): self.assertIn('data', cmd_out) res_args = arg_parser.parse_args( - ('result', '--by-key=data', run_cmd.last_tests[0].full_id)) + ('result', '--by-key=data', str(run_cmd.last_tests[0].id))) rslt = result_cmd.run(self.pav_cfg, res_args) cmd_out, cmd_err = result_cmd.clear_output() self.assertEqual(rslt, 0, "Result command failed: \n{}\n{}" @@ -950,16 +951,16 @@ def test_result_cmd_all_passed(self): rslts = bad_rslts.gather_results(bad_rslts.run()) bad_rslts.save_results(rslts) - args = arg_parser.parse_args(['result', '--all-passed', good.full_id]) + args = arg_parser.parse_args(['result', '--all-passed', str(good.id)]) self.assertEqual(rslts_cmd.run(self.pav_cfg, args), 0) - args = arg_parser.parse_args(['result', '--all-passed', good.full_id, bad_run.full_id]) + args = arg_parser.parse_args(['result', '--all-passed', str(good.id), str(bad_run.id)]) self.assertEqual(rslts_cmd.run(self.pav_cfg, args), 1) - args = arg_parser.parse_args(['result', '--all-passed', good.full_id, bad_build.full_id]) + args = arg_parser.parse_args(['result', '--all-passed', str(good.id), str(bad_build.id)]) self.assertEqual(rslts_cmd.run(self.pav_cfg, args), 1) - args = arg_parser.parse_args(['result', '--all-passed', good.full_id, bad_rslts.full_id]) + args = arg_parser.parse_args(['result', '--all-passed', str(good.id), str(bad_rslts.id)]) self.assertEqual(rslts_cmd.run(self.pav_cfg, args), 1) def test_re_search(self): @@ -1087,11 +1088,11 @@ def test_flatten_results(self): series1 = run_cmd.last_series - loggers = get_result_loggers(self.pav_cfg, series1.sid) + loggers = get_result_loggers(self.pav_cfg, series1.id) series1.log_results(loggers) - series1.wait() - series1.wait_log() + series1.wait(10) + series1.wait_log(10) result_log1 = series1.get_result_paths()[0] @@ -1123,7 +1124,7 @@ def test_flatten_results(self): series2 = run_cmd.last_series - loggers = get_result_loggers(self.pav_cfg, series2.sid) + loggers = get_result_loggers(self.pav_cfg, str(series2.id)) series2.log_results(loggers) series2.wait() diff --git a/test/tests/series_cmd_tests.py b/test/tests/series_cmd_tests.py index ef2df6f04..d72023129 100644 --- a/test/tests/series_cmd_tests.py +++ b/test/tests/series_cmd_tests.py @@ -114,7 +114,7 @@ def test_cancel_series(self): ser = series_cmd.last_run_series self._wait_for_all_start(ser) - cancel_args = arg_parser.parse_args(['series', 'cancel', series_cmd.last_run_series.sid]) + cancel_args = arg_parser.parse_args(['series', 'cancel', str(series_cmd.last_run_series.id)]) cancel_result = series_cmd.run(self.pav_cfg, cancel_args) self.assertEqual(cancel_result, 0) self.assertEqual(ser.status.current().state, SERIES_STATES.CANCELED) @@ -129,7 +129,7 @@ def test_series_sets(self): args = arg_parser.parse_args(['series', 'run', 'multi']) self.assertEqual(series_cmd.run(self.pav_cfg, args), 0) series_cmd.last_run_series.wait(timeout=10) - sid = series_cmd.last_run_series.sid + sid = str(series_cmd.last_run_series.id) arg_lists = [ ['series', 'sets', sid], @@ -161,7 +161,7 @@ def test_series_list(self): list_args = [ ['series', 'list'], - ['series', 'ls', series_cmd.last_run_series.sid], + ['series', 'ls', str(series_cmd.last_run_series.id)], ['series', 'status', 'all'], ] for raw_args in list_args: @@ -186,7 +186,7 @@ def test_series_history(self): list_args = [ ['series', 'state_history', '--text'], - ['series', 'states', series_cmd.last_run_series.sid], + ['series', 'states', str(series_cmd.last_run_series.id)], ] for raw_args in list_args: args = arg_parser.parse_args(raw_args) diff --git a/test/tests/series_tests.py b/test/tests/series_tests.py index f35084b1a..3b5eed993 100644 --- a/test/tests/series_tests.py +++ b/test/tests/series_tests.py @@ -8,6 +8,19 @@ from pavilion.unittest import PavTestCase +def durations_overlap(durations): + """Return true if any of the given test durations overlap with one another.""" + + durations.sort() + + for i in range(len(durations) - 1): + for j in range(i + 1, len(durations)): + if durations[j][0] < durations[i][1]: + return True + + return False + + class SeriesTests(PavTestCase): def test_init(self): @@ -24,7 +37,7 @@ def test_init(self): # Add a basic test set and save. series1.add_test_set_config('series1', ['pass_fail']) - series2 = series.TestSeries.load(self.pav_cfg, series1.sid) + series2 = series.TestSeries.load(self.pav_cfg, series1.id) # Make sure a loaded series is the same as the original for attr in series1.__dict__.keys(): @@ -104,13 +117,16 @@ def test_series_simultaneous(self): test_series_obj.wait(timeout=10) last_ended = None - for test_id in sorted(test_series_obj.tests): + + durations = [] + + for test_id in test_series_obj.tests: test_obj = test_series_obj.tests[test_id] started = test_obj.results['started'] ended = test_obj.results['finished'] - if last_ended is not None: - self.assertLessEqual(last_ended, started) - last_ended = ended + durations.append((started, ended)) + + self.assertFalse(durations_overlap(durations)) def test_series_test_set_simultaneous(self): """Test to see if simultaneous in the test_set overrides the simultaneous at full series""" @@ -131,14 +147,15 @@ def test_series_test_set_simultaneous(self): test_series_obj.run() test_series_obj.wait(timeout=10) - last_ended = None - for test_id in sorted(test_series_obj.tests): + durations = [] + + for test_id in test_series_obj.tests: test_obj = test_series_obj.tests[test_id] started = test_obj.results['started'] ended = test_obj.results['finished'] - if last_ended is not None: - self.assertLessEqual(last_ended, started) - last_ended = ended + durations.append((started, ended)) + + self.assertFalse(durations_overlap(durations)) def test_series_modes(self): """Test if modes and host are applied correctly.""" diff --git a/test/tests/status_cmd_tests.py b/test/tests/status_cmd_tests.py index 2636c6b15..117e375d0 100644 --- a/test/tests/status_cmd_tests.py +++ b/test/tests/status_cmd_tests.py @@ -96,7 +96,7 @@ def test_status_command(self): series = TestSeries(self.pav_cfg, None) for test in tests: series._add_test('test', test) - test_str = " ".join([test.full_id for test in series.tests.values()]) + test_str = " ".join([str(test.id) for test in series.tests.values()]) status_cmd = commands.get_command('status') status_cmd.outfile = io.StringIO() @@ -105,7 +105,7 @@ def test_status_command(self): for test in series.tests.values(): parser = argparse.ArgumentParser() status_cmd._setup_arguments(parser) - arg_list = ['-j', test.full_id] + arg_list = ['-j', str(test.id)] args = parser.parse_args(arg_list) self.assertEqual(status_cmd.run(self.pav_cfg, args), 0) @@ -120,7 +120,7 @@ def test_status_command(self): for test in series.tests.values(): parser = argparse.ArgumentParser() status_cmd._setup_arguments(parser) - args = parser.parse_args([test.full_id]) + args = parser.parse_args([str(test.id)]) self.assertEqual(status_cmd.run(self.pav_cfg, args), 0) # Testing for multiple tests with tabular output @@ -187,10 +187,10 @@ def test_set_status_command(self): start_status = test.status.current() parser = argparse.ArgumentParser() set_status_cmd._setup_arguments(parser) - arg_list = ['-s', 'RUN_USER', '-n', 'tacos are delicious', test.full_id] + arg_list = ['-s', 'RUN_USER', '-n', 'tacos are delicious', str(test.id)] args = parser.parse_args(arg_list) self.assertEqual(set_status_cmd.run(self.pav_cfg, args), 0, - "Invalid run return for test {}".format(test.full_id)) + "Invalid run return for test {}".format(test.id)) end_status = test.status.current() self.assertNotEqual(end_status.state, start_status.state) @@ -284,7 +284,7 @@ def test_status_history(self): while not test.complete and time.time() < end: time.sleep(.1) - args = parser.parse_args(['--history', 'test.{}'.format(test.id)]) + args = parser.parse_args(['--history', str(test.id)]) self.assertEqual(status_cmd.run(self.pav_cfg, args), 0) out.seek(0) diff --git a/test/tests/sys_series_tracker_tests.py b/test/tests/sys_series_tracker_tests.py index b903e29ff..049ded300 100644 --- a/test/tests/sys_series_tracker_tests.py +++ b/test/tests/sys_series_tracker_tests.py @@ -37,4 +37,4 @@ def test_sys_name_tracker(self): with json_file.open() as json_series_file: data = json.load(json_series_file) - self.assertEqual(data[sys_name], series.sid) + self.assertEqual(data[sys_name], str(series.id)) diff --git a/test/tests/test_ids_tests.py b/test/tests/test_ids_tests.py index 70ef60365..bf64a77a1 100644 --- a/test/tests/test_ids_tests.py +++ b/test/tests/test_ids_tests.py @@ -1,3 +1,5 @@ +import uuid + from pavilion.unittest import PavTestCase from pavilion.test_ids import TestID, SeriesID, GroupID, TestRange, SeriesRange @@ -7,8 +9,8 @@ class TestIDTests(PavTestCase): def test_test_id_validation(self): """Test that validation is correctly performed for test IDs.""" - valid_ids = ("1", "test.1", "37") - invalid_ids = ("", "0", "test.0", "-3", "all" "last", "") + valid_ids = ("1", "s2.1", "37", uuid.uuid4().hex) + invalid_ids = ("", "test.0", "-3", "all" "last", "") for id in valid_ids: self.assertTrue(TestID.is_valid_id(id)) @@ -31,8 +33,8 @@ def test_series_id_validation(self): def test_group_id_validation(self): """Test that validation is correctly performed for group IDs.""" - valid_ids = ("mygroup") - invalid_ids = ("1", "s7", "all", "last", "test.1", "") + valid_ids = ("mygroup",) + invalid_ids = ("1", "s7", "all", "last", "s2.1", "123abc", "-as3", "a b") for id in valid_ids: self.assertTrue(GroupID.is_valid_id(id)) @@ -43,17 +45,23 @@ def test_group_id_validation(self): def test_test_range_expansion(self): """Test that test ID ranges are correctly expanded into sequences of test IDs.""" - ranges = ("1-2", "1-1", "2-1") + ranges = ("1-2", "1-1") expected = ([TestID("1"), TestID("2")], [TestID("1")], []) for i, rng in enumerate(ranges): self.assertEqual(TestRange.from_str(rng).expand(), expected[i]) + with self.assertRaises(ValueError): + TestRange.from_str("2-1") + def test_series_range_expansion(self): """Test that series ID ranges are correctly expanded into sequences of series IDs.""" - ranges = ("s1-s2", "s1-s1", "s2-s1") + ranges = ("s1-s2", "s1-s1") expected = ([SeriesID("s1"), SeriesID("s2")], [SeriesID("s1")], []) for i, rng in enumerate(ranges): self.assertEqual(SeriesRange.from_str(rng).expand(), expected[i]) + + with self.assertRaises(ValueError): + SeriesRange.from_str("s2-s1") diff --git a/test/tests/testset_tests.py b/test/tests/testset_tests.py index cdc129326..3e2f831c4 100644 --- a/test/tests/testset_tests.py +++ b/test/tests/testset_tests.py @@ -231,7 +231,7 @@ def test_cancel(self): for test in ts1.tests: self.assertEqual(test.status.current().state, test.status.states.CANCELLED, - msg="Test {} should be aborted".format(test.full_id)) + msg="Test {} should be aborted".format(test.id)) self.assertFalse(ts1.all_passed) def test_should_run(self): diff --git a/test/tests/wait_cmd_tests.py b/test/tests/wait_cmd_tests.py index f2b8670fc..98807ecb6 100644 --- a/test/tests/wait_cmd_tests.py +++ b/test/tests/wait_cmd_tests.py @@ -26,7 +26,7 @@ def test_wait_command(self): series = TestSeries(self.pav_cfg, None) for test in tests: series._add_test('test_set', test) - test_str = " ".join([test.full_id for test in series.tests.values()]) + test_str = " ".join([str(test.id) for test in series.tests.values()]) wait_cmd = commands.get_command('wait') wait_cmd.outfile = io.StringIO() @@ -35,7 +35,7 @@ def test_wait_command(self): for test in series.tests.values(): parser = argparse.ArgumentParser() wait_cmd._setup_arguments(parser) - arg_list = ['-t', '1', test.full_id] + arg_list = ['-t', '1', str(test.id)] args = parser.parse_args(arg_list) self.assertEqual(wait_cmd.run(self.pav_cfg, args), 0) @@ -50,7 +50,7 @@ def test_wait_command(self): for test in series.tests.values(): parser = argparse.ArgumentParser() wait_cmd._setup_arguments(parser) - arg_list = ['-t', '1', test.full_id] + arg_list = ['-t', '1', str(test.id)] args = parser.parse_args(arg_list) self.assertEqual(wait_cmd.run(self.pav_cfg, args), 0)