From 554f5d8d981492e3f77329f9086adcfa188f5774 Mon Sep 17 00:00:00 2001 From: fcosta_oliveira Date: Fri, 20 Sep 2024 12:37:29 +0100 Subject: [PATCH] Bumping version from 0.1.240 to 0.1.241 --- pyproject.toml | 2 +- utils/generate_latency_benchmarks.py | 62 +++++++++++++++++++--------- 2 files changed, 43 insertions(+), 21 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8805bd0..3abf6a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "redis-benchmarks-specification" -version = "0.1.240" +version = "0.1.241" description = "The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute." authors = ["filipecosta90 ","Redis Performance Group "] readme = "Readme.md" diff --git a/utils/generate_latency_benchmarks.py b/utils/generate_latency_benchmarks.py index e0531a8..6886982 100644 --- a/utils/generate_latency_benchmarks.py +++ b/utils/generate_latency_benchmarks.py @@ -7,6 +7,7 @@ import ruamel.yaml from ruamel.yaml.scalarstring import DoubleQuotedScalarString + def calculate_rate_limit(p50_value): if p50_value < 1000: return 100 @@ -15,7 +16,10 @@ def calculate_rate_limit(p50_value): else: return 10000 -def create_new_test_config(original_config_path, new_config_path, test_name, new_test_name, p50_value): + +def create_new_test_config( + original_config_path, new_config_path, test_name, new_test_name, p50_value +): # Check if the original configuration file exists if not os.path.exists(original_config_path): return False # Indicate failure @@ -23,7 +27,7 @@ def create_new_test_config(original_config_path, new_config_path, test_name, new # Load the original test configuration with ruamel.yaml yaml = ruamel.yaml.YAML() yaml.preserve_quotes = True # Preserve quotes in scalar values - with open(original_config_path, 'r') as file: + with open(original_config_path, "r") as file: config = yaml.load(file) # Calculate the total desired rate limit @@ -31,7 +35,7 @@ def create_new_test_config(original_config_path, new_config_path, test_name, new # Calculate per-connection rate limit # Extract the original arguments - original_arguments = config['clientconfig']['arguments'] + original_arguments = config["clientconfig"]["arguments"] # Convert to string if necessary if not isinstance(original_arguments, str): @@ -46,11 +50,15 @@ def create_new_test_config(original_config_path, new_config_path, test_name, new clients_per_thread = 50 # Default value threads = 4 # Default value - clients_match = re.search(r'(?:-c|--clients)(?:[=\s]+)(\d+)', original_arguments_str) + clients_match = re.search( + r"(?:-c|--clients)(?:[=\s]+)(\d+)", original_arguments_str + ) if clients_match: clients_per_thread = int(clients_match.group(1)) - threads_match = re.search(r'(?:-t|--threads)(?:[=\s]+)(\d+)', original_arguments_str) + threads_match = re.search( + r"(?:-t|--threads)(?:[=\s]+)(\d+)", original_arguments_str + ) if threads_match: threads = int(threads_match.group(1)) @@ -61,28 +69,35 @@ def create_new_test_config(original_config_path, new_config_path, test_name, new per_connection_rate_limit = max(1, int(total_rate_limit / total_connections)) # Remove existing rate limit arguments using regex - new_arguments = re.sub(r'--rate(?:-limit(?:ing)?)?(?:\s+\S+)?', '', original_arguments_str) + new_arguments = re.sub( + r"--rate(?:-limit(?:ing)?)?(?:\s+\S+)?", "", original_arguments_str + ) # Append the new '--rate-limiting' argument and its value - new_arguments = f'{new_arguments.strip()} --rate-limiting {per_connection_rate_limit}' + new_arguments = ( + f"{new_arguments.strip()} --rate-limiting {per_connection_rate_limit}" + ) # Update the test name to reflect the new test - config['name'] = new_test_name - config['description'] += f" Rate limited to {total_rate_limit} ops/sec." + config["name"] = new_test_name + config["description"] += f" Rate limited to {total_rate_limit} ops/sec." # Update the arguments in the config - config['clientconfig']['arguments'] = DoubleQuotedScalarString(new_arguments) + config["clientconfig"]["arguments"] = DoubleQuotedScalarString(new_arguments) # Ensure the destination directory exists os.makedirs(os.path.dirname(new_config_path), exist_ok=True) # Save the new test configuration - with open(new_config_path, 'w') as file: + with open(new_config_path, "w") as file: yaml.dump(config, file) - print(f"Created new test configuration for '{test_name}' with total rate limit {total_rate_limit} ops/sec and per-connection rate limit {per_connection_rate_limit} ops/sec.") + print( + f"Created new test configuration for '{test_name}' with total rate limit {total_rate_limit} ops/sec and per-connection rate limit {per_connection_rate_limit} ops/sec." + ) return True # Indicate success + def main(): parser = argparse.ArgumentParser( description="Create latency benchmarks", @@ -141,40 +156,47 @@ def main(): # Execute the TS.REVRANGE command # "-" and "+" denote the minimal and maximal timestamps result = rts.execute_command("TS.REVRANGE", ts_key, "-", "+") - + # Check if result is not empty if result: # Extract values and convert to floats values = [float(value) for timestamp, value in result] # Compute the median (p50) p50_value = np.median(values) - + # Output the results print(f"Results for test case '{test_name}': p50 rate = {p50_value}") rate = calculate_rate_limit(p50_value) - original_config_path = f'../redis_benchmarks_specification/test-suites/{test_name}.yml' # Original test config file - new_test_name = f'latency-rate-limited-{rate}_qps-{test_name}' - new_config_path = f'../redis_benchmarks_specification/test-suites/{new_test_name}.yaml' # New test config file - success = create_new_test_config(original_config_path, new_config_path, test_name, new_test_name, p50_value) + original_config_path = f"../redis_benchmarks_specification/test-suites/{test_name}.yml" # Original test config file + new_test_name = f"latency-rate-limited-{rate}_qps-{test_name}" + new_config_path = f"../redis_benchmarks_specification/test-suites/{new_test_name}.yaml" # New test config file + success = create_new_test_config( + original_config_path, + new_config_path, + test_name, + new_test_name, + p50_value, + ) if not success: failed_files.append(test_name) else: print(f"No data available for test case '{test_name}'.") failed_files.append(test_name) - + except redis.exceptions.ResponseError as e: print(f"Error retrieving data for test case '{test_name}': {e}") failed_files.append(test_name) except Exception as e: print(f"An error occurred while processing test case '{test_name}': {e}") failed_files.append(test_name) - + # At the end, print out the list of failed files if any if failed_files: print("\nThe following test cases had missing configuration files or errors:") for test_name in failed_files: print(f"- {test_name}") + if __name__ == "__main__": main()