From 7fdb3870a451a1cbc7f29d77177a14de58709656 Mon Sep 17 00:00:00 2001 From: filipecosta90 Date: Sun, 22 Oct 2023 23:51:15 +0100 Subject: [PATCH] Added support for spot instance setups --- pyproject.toml | 2 +- redisbench_admin/compare/args.py | 1 + redisbench_admin/compare/compare.py | 236 ++++++++++-------- redisbench_admin/export/export.py | 1 + redisbench_admin/run_async/async_env.py | 2 +- .../{terraform.py => async_terraform.py} | 0 redisbench_admin/run_async/run_async.py | 2 +- redisbench_admin/run_remote/remote_env.py | 106 ++++++-- redisbench_admin/run_remote/run_remote.py | 13 +- redisbench_admin/utils/benchmark_config.py | 29 ++- redisbench_admin/utils/remote.py | 20 +- tests/test_aa_run_remote.py | 10 +- tests/test_common.py | 9 +- .../benchmark_definitions/defaults.yml | 2 +- .../common-properties-enterprise.yml | 23 ++ tests/test_export.py | 40 +++ tests/test_remote.py | 5 +- tests/test_remote_env.py | 6 + 18 files changed, 362 insertions(+), 145 deletions(-) rename redisbench_admin/run_async/{terraform.py => async_terraform.py} (100%) create mode 100644 tests/test_data/common-properties-enterprise.yml diff --git a/pyproject.toml b/pyproject.toml index ca69bed..e9a1abc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "redisbench-admin" -version = "0.10.16" +version = "0.10.17" description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )." authors = ["filipecosta90 ","Redis Performance Group "] readme = "README.md" diff --git a/redisbench_admin/compare/args.py b/redisbench_admin/compare/args.py index a76da6d..2fd35c5 100644 --- a/redisbench_admin/compare/args.py +++ b/redisbench_admin/compare/args.py @@ -96,6 +96,7 @@ def create_compare_arguments(parser): parser.add_argument("--comparison-tag", type=str, default=None, required=False) parser.add_argument("--print-regressions-only", type=bool, default=False) parser.add_argument("--print-improvements-only", type=bool, default=False) + parser.add_argument("--skip-unstable", type=bool, default=False) parser.add_argument("--verbose", type=bool, default=False) parser.add_argument("--simple-table", type=bool, default=False) parser.add_argument("--use_metric_context_path", type=bool, default=False) diff --git a/redisbench_admin/compare/compare.py b/redisbench_admin/compare/compare.py index b56de41..c292c44 100644 --- a/redisbench_admin/compare/compare.py +++ b/redisbench_admin/compare/compare.py @@ -124,6 +124,7 @@ def compare_command_logic(args, project_name, project_version): simplify_table = args.simple_table print_regressions_only = args.print_regressions_only print_improvements_only = args.print_improvements_only + skip_unstable = args.skip_unstable baseline_tag = args.baseline_tag comparison_tag = args.comparison_tag last_n_baseline = args.last_n @@ -258,6 +259,7 @@ def compare_command_logic(args, project_name, project_version): comparison_deployment_name, print_improvements_only, print_regressions_only, + skip_unstable, regressions_percent_lower_limit, simplify_table, test, @@ -348,7 +350,12 @@ def compare_command_logic(args, project_name, project_version): user_input = "n" html_url = "n/a" regression_count = len(detected_regressions) - baseline_str, by_str, comparison_str = get_by_strings( + ( + baseline_str, + by_str_baseline, + comparison_str, + by_str_comparison, + ) = get_by_strings( baseline_branch, comparison_branch, baseline_tag, @@ -476,6 +483,7 @@ def compute_regression_table( comparison_deployment_name="oss-standalone", print_improvements_only=False, print_regressions_only=False, + skip_unstable=False, regressions_percent_lower_limit=5.0, simplify_table=False, test="", @@ -508,7 +516,7 @@ def compute_regression_table( logging.info( "Using a time-delta from {} to {}".format(from_human_str, to_human_str) ) - baseline_str, by_str, comparison_str = get_by_strings( + baseline_str, by_str_baseline, comparison_str, by_str_comparison = get_by_strings( baseline_branch, comparison_branch, baseline_tag, @@ -557,7 +565,8 @@ def compute_regression_table( comparison_deployment_name, baseline_str, comparison_str, - by_str, + by_str_baseline, + by_str_comparison, from_ts_ms, to_ts_ms, last_n_baseline, @@ -566,6 +575,7 @@ def compute_regression_table( metric_name, print_improvements_only, print_regressions_only, + skip_unstable, regressions_percent_lower_limit, rts, simplify_table, @@ -627,36 +637,54 @@ def get_by_strings( baseline_tag, comparison_tag, ): - use_tag = False - use_branch = False - by_str = "" + baseline_covered = False + comparison_covered = False + by_str_baseline = "" + by_str_comparison = "" baseline_str = "" comparison_str = "" - if (baseline_branch is not None) and comparison_branch is not None: - use_branch = True - by_str = "branch" + if baseline_branch is not None: + baseline_covered = True + by_str_baseline = "branch" baseline_str = baseline_branch + if comparison_branch is not None: + comparison_covered = True + by_str_baseline = "branch" comparison_str = comparison_branch - if baseline_tag is not None and comparison_tag is not None: - use_tag = True - by_str = "version" + if baseline_tag is not None: + if comparison_covered: + logging.error( + "--baseline-branch and --baseline-tag are mutually exclusive. Pick one..." + ) + exit(1) + baseline_covered = True + by_str_baseline = "version" baseline_str = baseline_tag + + if comparison_tag is not None: + # check if we had already covered comparison + if comparison_covered: + logging.error( + "--comparison-branch and --comparison-tag are mutually exclusive. Pick one..." + ) + exit(1) + comparison_covered = True + by_str_comparison = "version" comparison_str = comparison_tag - if use_branch is False and use_tag is False: + + if baseline_covered is False: logging.error( - "You need to provider either " - + "( --baseline-branch and --comparison-branch ) " - + "or ( --baseline-tag and --comparison-tag ) args" + "You need to provider either " + "( --baseline-branch or --baseline-tag ) " ) exit(1) - if use_branch is True and use_tag is True: + if comparison_covered is False: logging.error( - +"( --baseline-branch and --comparison-branch ) " - + "and ( --baseline-tag and --comparison-tag ) args are mutually exclusive" + "You need to provider either " + + "( --comparison-branch or --comparison-tag ) " ) exit(1) - return baseline_str, by_str, comparison_str + return baseline_str, by_str_baseline, comparison_str, by_str_comparison def from_rts_to_regression_table( @@ -664,7 +692,8 @@ def from_rts_to_regression_table( comparison_deployment_name, baseline_str, comparison_str, - by_str, + by_str_baseline, + by_str_comparison, from_ts_ms, to_ts_ms, last_n_baseline, @@ -673,6 +702,7 @@ def from_rts_to_regression_table( metric_name, print_improvements_only, print_regressions_only, + skip_unstable, regressions_percent_lower_limit, rts, simplify_table, @@ -693,8 +723,11 @@ def from_rts_to_regression_table( noise_waterline = 3 progress = tqdm(unit="benchmark time-series", total=len(test_names)) for test_name in test_names: + multi_value_baseline = check_multi_value_filter(baseline_str) + multi_value_comparison = check_multi_value_filter(comparison_str) + filters_baseline = [ - "{}={}".format(by_str, baseline_str), + "{}={}".format(by_str_baseline, baseline_str), "metric={}".format(metric_name), "{}={}".format(test_filter, test_name), "deployment_name={}".format(baseline_deployment_name), @@ -703,7 +736,7 @@ def from_rts_to_regression_table( if running_platform is not None: filters_baseline.append("running_platform={}".format(running_platform)) filters_comparison = [ - "{}={}".format(by_str, comparison_str), + "{}={}".format(by_str_comparison, comparison_str), "metric={}".format(metric_name), "{}={}".format(test_filter, test_name), "deployment_name={}".format(comparison_deployment_name), @@ -729,18 +762,10 @@ def from_rts_to_regression_table( comparison_str, len(comparison_timeseries), test_name ) ) - if len(baseline_timeseries) > 1: - logging.warning( - "\t\tTime-series: {}".format(", ".join(baseline_timeseries)) - ) - logging.info("Checking if Totals will reduce timeseries.") - new_base = [] - for ts_name in baseline_timeseries: - if "Totals" in ts_name: - new_base.append(ts_name) - baseline_timeseries = new_base - - if len(baseline_timeseries) != 1: + if len(baseline_timeseries) > 1 and multi_value_baseline is False: + baseline_timeseries = get_only_Totals(baseline_timeseries) + + if len(baseline_timeseries) != 1 and multi_value_baseline is False: if verbose: logging.warning( "Skipping this test given the value of timeseries !=1. Baseline timeseries {}".format( @@ -751,34 +776,23 @@ def from_rts_to_regression_table( logging.warning( "\t\tTime-series: {}".format(", ".join(baseline_timeseries)) ) - continue - else: - ts_name_baseline = baseline_timeseries[0] - if len(comparison_timeseries) > 1: - logging.warning( - "\t\tTime-series: {}".format(", ".join(comparison_timeseries)) - ) - logging.info("Checking if Totals will reduce timeseries.") - new_base = [] - for ts_name in comparison_timeseries: - if "Totals" in ts_name: - new_base.append(ts_name) - comparison_timeseries = new_base - if len(comparison_timeseries) != 1: + if len(comparison_timeseries) > 1 and multi_value_comparison is False: + comparison_timeseries = get_only_Totals(comparison_timeseries) + if len(comparison_timeseries) != 1 and multi_value_comparison is False: if verbose: logging.warning( "Comparison timeseries {}".format(len(comparison_timeseries)) ) continue - else: - ts_name_comparison = comparison_timeseries[0] baseline_v = "N/A" comparison_v = "N/A" baseline_values = [] + baseline_datapoints = [] comparison_values = [] + comparison_datapoints = [] percentage_change = 0.0 baseline_v_str = "N/A" comparison_v_str = "N/A" @@ -788,9 +802,11 @@ def from_rts_to_regression_table( note = "" try: - baseline_datapoints = rts.ts().revrange( - ts_name_baseline, from_ts_ms, to_ts_ms - ) + for ts_name_baseline in baseline_timeseries: + datapoints_inner = rts.ts().revrange( + ts_name_baseline, from_ts_ms, to_ts_ms + ) + baseline_datapoints.extend(datapoints_inner) ( baseline_pct_change, baseline_v, @@ -804,10 +820,12 @@ def from_rts_to_regression_table( last_n_baseline, verbose, ) + for ts_name_comparison in comparison_timeseries: + datapoints_inner = rts.ts().revrange( + ts_name_comparison, from_ts_ms, to_ts_ms + ) + comparison_datapoints.extend(datapoints_inner) - comparison_datapoints = rts.ts().revrange( - ts_name_comparison, from_ts_ms, to_ts_ms - ) ( comparison_pct_change, comparison_v, @@ -834,28 +852,17 @@ def from_rts_to_regression_table( pass unstable = False if baseline_v != "N/A" and comparison_v != "N/A": - stamp_b = "" - unstable = False if comparison_pct_change > 10.0 or baseline_pct_change > 10.0: note = "UNSTABLE (very high variance)" unstable = True - if baseline_pct_change > 10.0: - stamp_b = "UNSTABLE" - if simplify_table: - baseline_v_str = " {:.0f}".format(baseline_v) - else: - baseline_v_str = " {:.0f} +- {:.1f}% {} ({} datapoints)".format( - baseline_v, baseline_pct_change, stamp_b, len(baseline_values) - ) - stamp_c = "" - if comparison_pct_change > 10.0: - stamp_c = "UNSTABLE" - if simplify_table: - comparison_v_str = " {:.0f}".format(comparison_v) - else: - comparison_v_str = " {:.0f} +- {:.1f}% {} ({} datapoints)".format( - comparison_v, comparison_pct_change, stamp_c, len(comparison_values) - ) + + baseline_v_str = prepare_value_str( + baseline_pct_change, baseline_v, baseline_values, simplify_table + ) + comparison_v_str = prepare_value_str( + comparison_pct_change, comparison_v, comparison_values, simplify_table + ) + if metric_mode == "higher-better": percentage_change = ( float(comparison_v) / float(baseline_v) - 1 @@ -875,9 +882,11 @@ def from_rts_to_regression_table( note = note + " REGRESSION" detected_regressions.append(test_name) elif percentage_change < -noise_waterline: - note = note + " potential REGRESSION" + if simplify_table is False: + note = note + " potential REGRESSION" else: - note = note + " -- no change --" + if simplify_table is False: + note = note + " No Change" if percentage_change > 0.0 and not unstable: if percentage_change > waterline: @@ -885,9 +894,11 @@ def from_rts_to_regression_table( total_improvements = total_improvements + 1 note = note + " IMPROVEMENT" elif percentage_change > noise_waterline: - note = note + " potential IMPROVEMENT" + if simplify_table is False: + note = note + " potential IMPROVEMENT" else: - note = note + " -- no change --" + if simplify_table is False: + note = note + " No Change" if ( detected_improvement is False @@ -906,6 +917,8 @@ def from_rts_to_regression_table( should_add_line = True if print_all: should_add_line = True + if unstable and skip_unstable: + should_add_line = False if should_add_line: total_comparison_points = total_comparison_points + 1 @@ -914,7 +927,6 @@ def from_rts_to_regression_table( comparison_v_str, note, percentage_change, - simplify_table, table, test_name, ) @@ -929,6 +941,39 @@ def from_rts_to_regression_table( ) +def get_only_Totals(baseline_timeseries): + logging.warning("\t\tTime-series: {}".format(", ".join(baseline_timeseries))) + logging.info("Checking if Totals will reduce timeseries.") + new_base = [] + for ts_name in baseline_timeseries: + if "Totals" in ts_name: + new_base.append(ts_name) + baseline_timeseries = new_base + return baseline_timeseries + + +def check_multi_value_filter(baseline_str): + multi_value_baseline = False + if "(" in baseline_str and "," in baseline_str and ")" in baseline_str: + multi_value_baseline = True + return multi_value_baseline + + +def prepare_value_str(baseline_pct_change, baseline_v, baseline_values, simplify_table): + baseline_v_str = " {:.0f}".format(baseline_v) + stamp_b = "" + if baseline_pct_change > 10.0: + stamp_b = "UNSTABLE " + if len(baseline_values) > 1: + baseline_v_str += " +- {:.1f}% {}".format( + baseline_pct_change, + stamp_b, + ) + if simplify_table is False and len(baseline_values) > 1: + baseline_v_str += "({} datapoints)".format(len(baseline_values)) + return baseline_v_str + + def get_test_names_from_db(rts, tags_regex_string, test_names, used_key): try: test_names = rts.smembers(used_key) @@ -962,30 +1007,19 @@ def add_line( comparison_v_str, note, percentage_change, - simplify_table, table, test_name, ): percentage_change_str = "{:.1f}% ".format(percentage_change) - if simplify_table: - table.append( - [ - test_name, - baseline_v_str, - comparison_v_str, - percentage_change_str, - ] - ) - else: - table.append( - [ - test_name, - baseline_v_str, - comparison_v_str, - percentage_change_str, - note.strip(), - ] - ) + table.append( + [ + test_name, + baseline_v_str, + comparison_v_str, + percentage_change_str, + note.strip(), + ] + ) def get_v_pct_change_and_largest_var( diff --git a/redisbench_admin/export/export.py b/redisbench_admin/export/export.py index d177255..7968068 100644 --- a/redisbench_admin/export/export.py +++ b/redisbench_admin/export/export.py @@ -64,6 +64,7 @@ def export_command_logic(args, project_name, project_version): exit(1) else: ( + _, _, metrics, exporter_timemetric_path, diff --git a/redisbench_admin/run_async/async_env.py b/redisbench_admin/run_async/async_env.py index f7d7bc2..701190b 100644 --- a/redisbench_admin/run_async/async_env.py +++ b/redisbench_admin/run_async/async_env.py @@ -6,7 +6,7 @@ import logging import tarfile -from redisbench_admin.run_async.terraform import ( +from redisbench_admin.run_async.async_terraform import ( retrieve_inventory_info, terraform_spin_or_reuse_env, ) diff --git a/redisbench_admin/run_async/terraform.py b/redisbench_admin/run_async/async_terraform.py similarity index 100% rename from redisbench_admin/run_async/terraform.py rename to redisbench_admin/run_async/async_terraform.py diff --git a/redisbench_admin/run_async/run_async.py b/redisbench_admin/run_async/run_async.py index ca9b3f5..bf94961 100644 --- a/redisbench_admin/run_async/run_async.py +++ b/redisbench_admin/run_async/run_async.py @@ -26,7 +26,7 @@ renderRunFile, savePemFile, ) -from redisbench_admin.run_async.terraform import ( +from redisbench_admin.run_async.async_terraform import ( TerraformClass, ) from redisbench_admin.utils.remote import ( diff --git a/redisbench_admin/run_remote/remote_env.py b/redisbench_admin/run_remote/remote_env.py index d5efd90..a3470a1 100644 --- a/redisbench_admin/run_remote/remote_env.py +++ b/redisbench_admin/run_remote/remote_env.py @@ -5,10 +5,13 @@ # import logging +from python_terraform import TerraformCommandError + from redisbench_admin.run_remote.terraform import ( retrieve_inventory_info, terraform_spin_or_reuse_env, ) +from redisbench_admin.utils.remote import check_remote_setup_spot_instance def remote_env_setup( @@ -27,6 +30,7 @@ def remote_env_setup( tf_timeout_secs=7200, tf_override_name=None, tf_folder_path=None, + spot_instance_error=False, ): server_plaintext_port = args.db_port db_ssh_port = args.db_ssh_port @@ -44,34 +48,83 @@ def remote_env_setup( "Missing one of the required keys for inventory usage. Exiting..." ) exit(1) - logging.info("Using the following connection addresses.") - logging.info("client_public_ip={}".format(client_public_ip)) - logging.info("server_public_ip={}".format(server_public_ip)) - logging.info("server_private_ip={}".format(server_private_ip)) + else: - ( - client_public_ip, - _, - _, - server_private_ip, - server_public_ip, - username, - ) = terraform_spin_or_reuse_env( - benchmark_config, - remote_envs, - repetition, - test_name, - tf_bin_path, - tf_github_actor, - tf_github_org, - tf_github_repo, - tf_github_sha, - tf_setup_name_sufix, - tf_triggering_env, - tf_timeout_secs, - tf_override_name, - tf_folder_path, + contains_spot, tf_folder_spot_path = check_remote_setup_spot_instance( + benchmark_config["remote"] ) + spot_available_and_used = False + if contains_spot: + logging.info(f"detected spot instance config in {tf_folder_spot_path}.") + if spot_instance_error is False: + logging.info( + f"Will deploy the detected spot instance config in {tf_folder_spot_path}." + ) + try: + ( + client_public_ip, + _, + _, + server_private_ip, + server_public_ip, + username, + ) = terraform_spin_or_reuse_env( + benchmark_config, + remote_envs, + repetition, + test_name, + tf_bin_path, + tf_github_actor, + tf_github_org, + tf_github_repo, + tf_github_sha, + tf_setup_name_sufix, + tf_triggering_env, + tf_timeout_secs, + tf_override_name, + tf_folder_spot_path, + ) + spot_available_and_used = True + except TerraformCommandError as error: + spot_instance_error = True + logging.error( + "Received the following error while trying to deploy the spot instance setup: {}.".format( + error.__str__() + ) + ) + pass + else: + logging.warning( + f"Even though there is a spot instance config, avoiding deploying it." + ) + if spot_available_and_used is False: + ( + client_public_ip, + _, + _, + server_private_ip, + server_public_ip, + username, + ) = terraform_spin_or_reuse_env( + benchmark_config, + remote_envs, + repetition, + test_name, + tf_bin_path, + tf_github_actor, + tf_github_org, + tf_github_repo, + tf_github_sha, + tf_setup_name_sufix, + tf_triggering_env, + tf_timeout_secs, + tf_override_name, + tf_folder_path, + ) + logging.info("Using the following connection addresses.") + logging.info(f"client_public_ip={client_public_ip}") + logging.info(f"server_public_ip={server_public_ip}") + logging.info(f"server_private_ip={server_private_ip}") return ( client_public_ip, server_plaintext_port, @@ -80,4 +133,5 @@ def remote_env_setup( db_ssh_port, client_ssh_port, username, + spot_instance_error, ) diff --git a/redisbench_admin/run_remote/run_remote.py b/redisbench_admin/run_remote/run_remote.py index e203f84..5a8b4f0 100644 --- a/redisbench_admin/run_remote/run_remote.py +++ b/redisbench_admin/run_remote/run_remote.py @@ -304,6 +304,9 @@ def run_remote_command_logic(args, project_name, project_version): # contains the overall target-tables ( if any target is defined ) overall_tables = {} + # Used to only deploy spot once per run + spot_instance_error = False + for benchmark_type, bench_by_dataset_map in benchmark_runs_plan.items(): if return_code != 0 and args.fail_fast: logging.warning( @@ -414,6 +417,7 @@ def run_remote_command_logic(args, project_name, project_version): db_ssh_port, client_ssh_port, username, + spot_instance_error, ) = remote_env_setup( args, benchmark_config, @@ -430,6 +434,7 @@ def run_remote_command_logic(args, project_name, project_version): tf_timeout_secs, TF_OVERRIDE_NAME, TF_OVERRIDE_REMOTE, + spot_instance_error, ) # after we've created the env, even on error we should always teardown @@ -848,7 +853,9 @@ def run_remote_command_logic(args, project_name, project_version): {"metric-type": "latencystats"}, expire_ms, ) - except redis.exceptions.ConnectionError as e: + except ( + redis.exceptions.ConnectionError + ) as e: db_error_artifacts( db_ssh_port, dirname, @@ -1054,9 +1061,7 @@ def run_remote_command_logic(args, project_name, project_version): else: logging.info( - "Test {} does not have remote config. Skipping test.".format( - test_name - ) + f"Test {test_name} does not have remote config. Skipping test." ) if len(benchmark_artifacts_links) > 0: diff --git a/redisbench_admin/utils/benchmark_config.py b/redisbench_admin/utils/benchmark_config.py index 67987ae..b9e5eb8 100644 --- a/redisbench_admin/utils/benchmark_config.py +++ b/redisbench_admin/utils/benchmark_config.py @@ -67,6 +67,7 @@ def prepare_benchmark_definitions(args): ( default_kpis, + default_remote, default_metrics, exporter_timemetric_path, default_specs, @@ -75,7 +76,7 @@ def prepare_benchmark_definitions(args): for usecase_filename in files: with open(usecase_filename, "r", encoding="utf8") as stream: test_result, benchmark_config, test_name = get_final_benchmark_config( - default_kpis, stream, usecase_filename + default_kpis, default_remote, stream, usecase_filename ) result &= test_result if test_result: @@ -109,6 +110,7 @@ def get_defaults(defaults_filename): default_metrics = [] exporter_timemetric_path = None default_kpis = None + default_remote = None default_specs = None cluster_config = None if os.path.exists(defaults_filename): @@ -118,12 +120,14 @@ def get_defaults(defaults_filename): ) ( default_kpis, + default_remote, default_metrics, exporter_timemetric_path, default_specs, cluster_config, ) = process_default_yaml_properties_file( default_kpis, + default_remote, default_metrics, defaults_filename, exporter_timemetric_path, @@ -131,6 +135,7 @@ def get_defaults(defaults_filename): ) return ( default_kpis, + default_remote, default_metrics, exporter_timemetric_path, default_specs, @@ -138,7 +143,7 @@ def get_defaults(defaults_filename): ) -def get_final_benchmark_config(default_kpis, stream, usecase_filename): +def get_final_benchmark_config(default_kpis, default_remote, stream, usecase_filename): result = False benchmark_config = None test_name = None @@ -150,6 +155,11 @@ def get_final_benchmark_config(default_kpis, stream, usecase_filename): merge_default_and_specific_properties_dict_type( benchmark_config, default_kpis, kpis_keyname, usecase_filename ) + kpis_keyname = "remote" + if default_remote is not None: + merge_default_and_specific_properties_dict_type( + benchmark_config, default_remote, kpis_keyname, usecase_filename + ) test_name = benchmark_config["name"] result = True except Exception as e: @@ -261,12 +271,24 @@ def extract_redis_dbconfig_parameters(benchmark_config, dbconfig_keyname): def process_default_yaml_properties_file( - default_kpis, default_metrics, defaults_filename, exporter_timemetric_path, stream + default_kpis, + default_remote, + default_metrics, + defaults_filename, + exporter_timemetric_path, + stream, ): default_config = yaml.safe_load(stream) default_specs = None cluster_config = None default_metrics, exporter_timemetric_path = extract_exporter_metrics(default_config) + if "remote" in default_config: + logging.info( + "Loading default REMOTE specifications from file: {}".format( + defaults_filename + ) + ) + default_remote = default_config["remote"] if "kpis" in default_config: logging.info( "Loading default KPIs specifications from file: {}".format( @@ -288,6 +310,7 @@ def process_default_yaml_properties_file( cluster_config = default_config["clusterconfig"] return ( default_kpis, + default_remote, default_metrics, exporter_timemetric_path, default_specs, diff --git a/redisbench_admin/utils/remote.py b/redisbench_admin/utils/remote.py index f84f8b4..fd818a6 100644 --- a/redisbench_admin/utils/remote.py +++ b/redisbench_admin/utils/remote.py @@ -300,6 +300,7 @@ def setup_remote_environment( "triggering_env": tf_triggering_env, "timeout_secs": tf_timeout_secs, }, + raise_on_error=True, ) return retrieve_tf_connection_vars(return_code, tf) @@ -548,13 +549,28 @@ def common_tf(branch, path, repo, temporary_dir=None, destroy=False): return terraform_working_dir +def check_remote_setup_spot_instance( + remote_setup_config, +): + logging.info(f"Checking for spot instance config within... {remote_setup_config}") + spot_path = None + contains_spot_instance = False + for remote_setup_property in remote_setup_config: + if "spot_instance" in remote_setup_property: + spot_path = "/terraform/" + remote_setup_property["spot_instance"] + contains_spot_instance = True + logging.info(f"Detected spot instance config. Setup path: {spot_path}") + + return contains_spot_instance, spot_path + + def fetch_remote_setup_from_config( remote_setup_config, repo="https://github.com/redis-performance/testing-infrastructure.git", branch="master", path=None, ): - setup_type = None + setup_type = "oss-standalone" setup = None if path is None: for remote_setup_property in remote_setup_config: @@ -562,6 +578,8 @@ def fetch_remote_setup_from_config( setup_type = remote_setup_property["type"] if "setup" in remote_setup_property: setup = remote_setup_property["setup"] + if "spot_instance" in remote_setup_property: + spot_path = "/terraform/" + remote_setup_property["spot_instance"] # fetch terraform folder path = "/terraform/{}-{}".format(setup_type, setup) terraform_working_dir = common_tf(branch, path, repo) diff --git a/tests/test_aa_run_remote.py b/tests/test_aa_run_remote.py index cbfbab8..b334bd4 100644 --- a/tests/test_aa_run_remote.py +++ b/tests/test_aa_run_remote.py @@ -50,11 +50,14 @@ def test_merge_default_and_config_metrics(): with open("./tests/test_data/common-properties-v0.1.yml", "r") as yml_file: ( default_kpis, + _, default_metrics, exporter_timemetric_path, default_specs, cluster_config, - ) = process_default_yaml_properties_file(None, None, "1.yml", None, yml_file) + ) = process_default_yaml_properties_file( + None, None, None, "1.yml", None, yml_file + ) assert exporter_timemetric_path == "$.StartTime" merged_exporter_timemetric_path, metrics = merge_default_and_config_metrics( {}, default_metrics, exporter_timemetric_path @@ -67,11 +70,14 @@ def test_merge_default_and_config_metrics(): with open("./tests/test_data/common-properties-v0.1.yml", "r") as yml_file: ( default_kpis, + _, default_metrics, exporter_timemetric_path, default_specs, cluster_config, - ) = process_default_yaml_properties_file(None, None, "1.yml", None, yml_file) + ) = process_default_yaml_properties_file( + None, None, None, "1.yml", None, yml_file + ) assert exporter_timemetric_path == "$.StartTime" assert default_specs == None with open( diff --git a/tests/test_common.py b/tests/test_common.py index 7cca968..739e93e 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -348,11 +348,14 @@ def test_process_default_yaml_properties_file(): with open("./tests/test_data/common-properties-v0.1.yml", "r") as yml_file: ( default_kpis, + _, default_metrics, exporter_timemetric_path, default_specs, cluster_config, - ) = process_default_yaml_properties_file(None, None, "1.yml", None, yml_file) + ) = process_default_yaml_properties_file( + None, None, None, "1.yml", None, yml_file + ) assert exporter_timemetric_path == "$.StartTime" assert default_kpis is None assert default_specs is None @@ -371,7 +374,7 @@ def test_extract_test_feasible_setups(): usecase_filename = "./tests/test_data/tsbs-devops-ingestion-scale100-4days-v2.yml" with open(usecase_filename, "r") as stream: _, benchmark_config, test_name = get_final_benchmark_config( - default_kpis, stream, usecase_filename + default_kpis, None, stream, usecase_filename ) assert cluster_config == { "init_commands": [ @@ -412,7 +415,7 @@ def test_extract_test_feasible_setups(): # wrong read res, benchmark_config, test_name = get_final_benchmark_config( - default_kpis, stream, "dont exist" + default_kpis, None, stream, "dont exist" ) assert res == False assert benchmark_config == None diff --git a/tests/test_data/benchmark_definitions/defaults.yml b/tests/test_data/benchmark_definitions/defaults.yml index a473a65..065ec5e 100644 --- a/tests/test_data/benchmark_definitions/defaults.yml +++ b/tests/test_data/benchmark_definitions/defaults.yml @@ -20,4 +20,4 @@ exporter: metrics: - "$.OverallQueryRates.Total" mode: higher-better - baseline-branch: master + higher-better: master diff --git a/tests/test_data/common-properties-enterprise.yml b/tests/test_data/common-properties-enterprise.yml new file mode 100644 index 0000000..0212a43 --- /dev/null +++ b/tests/test_data/common-properties-enterprise.yml @@ -0,0 +1,23 @@ +version: 0.5 +exporter: + redistimeseries: + test_name: "$.test_name" + version: "$.environment.version" + build: "$.environment.version" + branch: "$.environment.branch" + hash: "$.environment.hash" + timemetric: "$.test_metrics.memtier.'ALL STATS'.Runtime.'Start time'" + metrics: + - "$.test_metrics.memtier.'ALL STATS'.Totals.'Percentile Latencies'.'p99.00'" + - "$.test_metrics.memtier.'ALL STATS'.Totals.'Percentile Latencies'.'p50.00'" + - "$.test_metrics.memtier.'ALL STATS'.Totals.'Percentile Latencies'.'p99.00'" + - "$.test_metrics.memtier.'ALL STATS'.Totals.'Ops/sec'" + - "$.test_metrics.memtier.'ALL STATS'.Totals.'Average Latency'" + - "$.test_metrics.memtier.'ALL STATS'.Totals.'Min Latency'" + - "$.test_metrics.memtier.'ALL STATS'.Totals.'Max Latency'" + comparison: + metrics: + - "Ops/sec" + mode: higher-better + baseline-branch: master + baseline-version: latest diff --git a/tests/test_export.py b/tests/test_export.py index 534bed6..b42ce9f 100644 --- a/tests/test_export.py +++ b/tests/test_export.py @@ -73,6 +73,46 @@ def test_export_command_logic(): assert e.code == 0 +def test_export_command_logic(): + rts_host = os.getenv("RTS_DATASINK_HOST", None) + rts_port = 16379 + rts_pass = "" + if rts_host is None: + return + rts = redis.Redis(port=16379, host=rts_host) + rts.ping() + rts.flushall() + parser = argparse.ArgumentParser( + description="test", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser = create_export_arguments(parser) + args = parser.parse_args( + args=[ + "--benchmark-result-file", + "./tests/test_data/result_report_1690282709.835491.json", + "--exporter-spec-file", + "./tests/test_data/common-properties-enterprise.yml", + "--redistimeseries_host", + rts_host, + "--redistimeseries_port", + "{}".format(rts_port), + "--redistimeseries_pass", + "{}".format(rts_pass), + "--deployment-type", + "enterprise", + "--github_repo", + "redis", + "--github_org", + "redis", + ] + ) + try: + export_command_logic(args, "tool", "v0") + except SystemExit as e: + assert e.code == 0 + + def test_export_opereto_csv_to_timeseries_dict(): break_by_dict = {"branch": "master", "version": "6.2.0"} benchmark_file = "./tests/test_data/2021-10-01.120753test_1sh_1wk_dual_ep_mixed_2thr_50conns_persistent_mtls_msetmget_kb_1p_csv_string.csv" diff --git a/tests/test_remote.py b/tests/test_remote.py index bcac11d..680cba2 100644 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -126,11 +126,14 @@ def test_extract_perversion_timeseries_from_results(): with open("./tests/test_data/common-properties-v0.1.yml", "r") as yml_file: ( default_kpis, + default_remote, default_metrics, exporter_timemetric_path, default_specs, cluster_config, - ) = process_default_yaml_properties_file(None, None, "1.yml", None, yml_file) + ) = process_default_yaml_properties_file( + None, None, None, "1.yml", None, yml_file + ) assert exporter_timemetric_path == "$.StartTime" assert default_specs == None with open( diff --git a/tests/test_remote_env.py b/tests/test_remote_env.py index 39c1fb9..7acc977 100644 --- a/tests/test_remote_env.py +++ b/tests/test_remote_env.py @@ -41,6 +41,7 @@ def test_remote_env_setup(): db_ssh_port, client_ssh_port, username, + spot_instance_error, ) = remote_env_setup( args, benchmark_config, @@ -54,11 +55,16 @@ def test_remote_env_setup(): tf_github_sha, tf_setup_name_sufix, tf_triggering_env, + 7200, + None, + None, + False, ) assert client_public_ip == "2.2.2.2" assert server_private_ip == "10.0.0.1" assert server_public_ip == "1.1.1.1" + assert spot_instance_error == False # using inventory but missing one manadatory key args = parser.parse_args(