From fb615f68304f8326036eb08cf92a0b993ffb5e28 Mon Sep 17 00:00:00 2001 From: Wojciech Zyla Date: Tue, 25 Jul 2023 14:24:10 +0200 Subject: [PATCH] fix: save mongo configuration to values.yaml fix: change directory for temp files fix: add test for edited values.yaml file fix: typos and description of yaml_escape_list function --- backend/SC4SNMP_UI_backend/__init__.py | 7 + .../apply_changes/config_to_yaml_utils.py | 80 +++++- .../apply_changes/handling_chain.py | 31 ++- ...ions.py => test_backend_ui_conversions.py} | 0 .../post_endpoints/test_post_apply_changes.py | 254 ++++++++++++++---- .../reference_files/poller_inventory.yaml | 4 + .../reference_files/scheduler_groups.yaml | 12 + .../reference_files/scheduler_profiles.yaml | 51 ++++ .../reference_files/values.yaml | 161 +++++++++++ .../yamls_for_tests/values_test/.gitignore | 1 + .../values_test/values-before-edit.yaml | 139 ++++++++++ .../yamls_for_tests/values_test/values.yaml | 139 ++++++++++ 12 files changed, 800 insertions(+), 79 deletions(-) rename backend/tests/common/{test_conversions.py => test_backend_ui_conversions.py} (100%) create mode 100644 backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml create mode 100644 backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml create mode 100644 backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml create mode 100644 backend/tests/yamls_for_tests/reference_files/values.yaml create mode 100644 backend/tests/yamls_for_tests/values_test/.gitignore create mode 100644 backend/tests/yamls_for_tests/values_test/values-before-edit.yaml create mode 100644 backend/tests/yamls_for_tests/values_test/values.yaml diff --git a/backend/SC4SNMP_UI_backend/__init__.py b/backend/SC4SNMP_UI_backend/__init__.py index 4780086..3276693 100644 --- a/backend/SC4SNMP_UI_backend/__init__.py +++ b/backend/SC4SNMP_UI_backend/__init__.py @@ -14,9 +14,16 @@ mongo_client = MongoClient(MONGO_URI) CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "amqp://guest:guest@localhost:5672//") REDIS_URL = os.getenv("REDIS_URL") +VALUES_DIRECTORY = os.getenv("VALUES_DIRECTORY", "") +KEEP_TEMP_FILES = os.getenv("KEEP_TEMP_FILES", "false") +class NoValuesDirectoryException(Exception): + pass def create_app(): + if len(VALUES_DIRECTORY) == 0: + raise NoValuesDirectoryException + app = Flask(__name__) app.config.from_mapping( diff --git a/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py b/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py index 7c188ee..2ed1900 100644 --- a/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py +++ b/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py @@ -16,8 +16,21 @@ def bool_to_str(value): class MongoToYamlDictConversion: + """ + MongoToYamlDictConversion is an abstract class. Implementations of this class converts + appropriate mongo collections to dictionaries in such a way, that configurations from those collections can be + dumped to yaml file with appropriate formatting. + """ @classmethod def yaml_escape_list(cls, *l): + """ + This function is used to parse an example list [yaml_escape_list(el1, el2, el3)] like this: + - [el1, el2, el3] + and not like this: + - el1 + - el2 + - el3 + """ ret = ruamel.yaml.comments.CommentedSeq(l) ret.fa.set_flow_style() return ret @@ -28,6 +41,12 @@ def convert(self, documents: list) -> dict: class ProfilesToYamlDictConversion(MongoToYamlDictConversion): def convert(self, documents: list) -> dict: + """ + ProfilesToYamlDictConversion converts profiles from mongo collection to + format that can be dumped to yaml file + :param documents: list of profiles from mongo + :return: dictionary that can be dumped to yaml + """ result = {} for profile in documents: profile_name = get_group_or_profile_name_from_backend(profile) @@ -79,6 +98,12 @@ def convert(self, documents: list) -> dict: class GroupsToYamlDictConversion(MongoToYamlDictConversion): def convert(self, documents: list) -> dict: + """ + GroupsToYamlDictConversion converts groups from mongo collection to + format that can be dumped to yaml file + :param documents: list of groups from mongo + :return: dictionary that can be dumped to yaml + """ result = {} for group in documents: group_name = get_group_or_profile_name_from_backend(group) @@ -99,6 +124,12 @@ def convert(self, documents: list) -> dict: class InventoryToYamlDictConversion(MongoToYamlDictConversion): def convert(self, documents: list) -> dict: + """ + InventoryToYamlDictConversion converts inventory from mongo collection to + format that can be dumped to yaml file + :param documents: inventory from mongo + :return: dictionary that can be dumped to yaml + """ inventory_string = "address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete" for inv in documents: smart_profiles = bool_to_str(inv['smart_profiles']) @@ -112,6 +143,12 @@ def convert(self, documents: list) -> dict: class TempFileHandling: + """ + After converting configurations from mongo to dictionaries ready to be dumped to yaml file, those dictionaries + must be dumped to temporary files. This is because those configurations must be parsed before they are inserted + to values.yaml file. TempFileHandling is an abstract class whose implementations parse dictionaries and return + ready configuration that can be saved in values.yaml + """ def __init__(self, file_path: str): self._file_path = file_path @@ -124,7 +161,7 @@ def _delete_temp(self): if os.path.exists(self._file_path): os.remove(self._file_path) else: - current_app.logger.info(f"Pod directory {self._file_path} doesn't exist. File wasn't removed.") + current_app.logger.info(f"Directory {self._file_path} doesn't exist inside a Pod. File wasn't removed.") @abstractmethod def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): @@ -136,45 +173,60 @@ def __init__(self, file_path: str): super().__init__(file_path) def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with profiles configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ self._save_temp(document) lines = "" with open(self._file_path, "r") as file: line = file.readline() while line != "": - lines += f"{line}" + lines += line line = file.readline() if delete_tmp: self._delete_temp() return literal_string(lines) -class GroupsTempHandling(TempFileHandling): +class InventoryTempHandling(TempFileHandling): def __init__(self, file_path: str): super().__init__(file_path) def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with inventory configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ self._save_temp(document) - lines = "" + yaml = ruamel.yaml.YAML() with open(self._file_path, "r") as file: - line = file.readline() - while line != "": - lines += f"{line}" - line = file.readline() + inventory = yaml.load(file) + result = inventory["inventory"] if delete_tmp: self._delete_temp() - return literal_string(lines) + return literal_string(result) -class InventoryTempHandling(TempFileHandling): +class GroupsTempHandling(TempFileHandling): def __init__(self, file_path: str): super().__init__(file_path) def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with groups configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ self._save_temp(document) - yaml = ruamel.yaml.YAML() + lines = "" with open(self._file_path, "r") as file: - inventory = yaml.load(file) - result = inventory["inventory"] + line = file.readline() + while line != "": + lines += line + line = file.readline() if delete_tmp: self._delete_temp() - return literal_string(result) \ No newline at end of file + return literal_string(lines) diff --git a/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py b/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py index 7093d47..fd8e51a 100644 --- a/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py +++ b/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py @@ -8,10 +8,11 @@ CHANGES_INTERVAL_SECONDS = 300 -TMP_FILE_PREFIX = "sc4snmp_ui" +TMP_FILE_PREFIX = "sc4snmp_ui_" TMP_DIR = "/tmp" VALUES_DIRECTORY = os.getenv("VALUES_DIRECTORY", "") VALUES_FILE = os.getenv("VALUES_FILE", "") +KEEP_TEMP_FILES = os.getenv("KEEP_TEMP_FILES", "false") mongo_config_collection = mongo_client.sc4snmp.config_collection mongo_groups = mongo_client.sc4snmp.groups_ui mongo_inventory = mongo_client.sc4snmp.inventory_ui @@ -44,28 +45,43 @@ def handle(self, request: dict): class SaveConfigToFileHandler(AbstractHandler): def handle(self, request: dict): """ + SaveConfigToFileHandler saves current configuration of profiles, groups and inventory from mongo + to files on the host machine. - :yaml_sections = { - "": (mongo_collection, MongoToYamlDictConversion, TempFileHandling) + :param request: dictionary with at least one key "yaml_sections". Under this key there should be dictionary + with the following structure + { + "key.to.section": (mongo_collection, MongoToYamlDictConversion, TempFileHandling) } + where: + - "key.to.section": a key to section of values.yaml file that should be updated (e.g. "scheduler.profiles") + - mongo_collection: mongo collection with configuration of given section + - MongoToYamlDictConversion: implementation of this abstract class + - TempFileHandling: implementation of this abstract class """ - if len(VALUES_DIRECTORY) == 0: - raise ValueError("VALUES_DIRECTORY must be provided.") yaml = ruamel.yaml.YAML() values_file_resolved = True values_file_path = os.path.join(VALUES_DIRECTORY, VALUES_FILE) + if len(VALUES_FILE) == 0 or (VALUES_FILE.split(".")[1] != "yaml" and VALUES_FILE.split(".")[1] != "yml") or \ not os.path.exists(os.path.join(VALUES_DIRECTORY, VALUES_FILE)): + # If VALUES_FILE can't be found or wasn't provided, it won't be updated. In this case separate files + # with configuration of specific section will be saved in the hosts machine. values_file_resolved = False values = {} if values_file_resolved: with open(values_file_path, "r") as file: values = yaml.load(file) + if not values_file_resolved or KEEP_TEMP_FILES.lower() in ["t", "true", "y", "yes", "1"]: + delete_temp_files = False + else: + delete_temp_files = True + for key, value in request["yaml_sections"].items(): tmp_file_name = TMP_FILE_PREFIX + key.replace(".", "_") + ".yaml" - directory = VALUES_DIRECTORY if not values_file_resolved else TMP_DIR + directory = VALUES_DIRECTORY if not delete_temp_files else TMP_DIR tmp_file_path = os.path.join(directory, tmp_file_name) mongo_collection = value[0] @@ -74,8 +90,9 @@ def handle(self, request: dict): documents = list(mongo_collection.find()) converted = mongo_to_yaml_conversion.convert(documents) - parsed_values = tmp_file_handling.parse_dict_to_yaml(converted, values_file_resolved) + parsed_values = tmp_file_handling.parse_dict_to_yaml(converted, delete_temp_files) + # update appropriate section values dictionary values_keys = key.split(".") sub_dict = values for value_index, value_key in enumerate(values_keys): diff --git a/backend/tests/common/test_conversions.py b/backend/tests/common/test_backend_ui_conversions.py similarity index 100% rename from backend/tests/common/test_conversions.py rename to backend/tests/common/test_backend_ui_conversions.py diff --git a/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py b/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py index f899cfb..58cebec 100644 --- a/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py +++ b/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py @@ -1,24 +1,191 @@ from unittest import mock from unittest.mock import call from bson import ObjectId +from copy import copy +import ruamel import datetime +import os +from SC4SNMP_UI_backend.apply_changes.handling_chain import TMP_FILE_PREFIX + +VALUES_TEST_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../yamls_for_tests/values_test") +REFERENCE_FILES_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../yamls_for_tests/reference_files") + +def return_generated_and_reference_files(): + reference_files_names = ["poller_inventory.yaml", "scheduler_profiles.yaml", "scheduler_groups.yaml"] + reference_files = [] + generated_files = [] + yaml = ruamel.yaml.YAML() + + for file_name in reference_files_names: + # add temporary files + reference_file_path = os.path.join(REFERENCE_FILES_DIRECTORY, file_name) + with open(reference_file_path, "r") as file: + data = yaml.load(file) + reference_files.append(copy(data)) + + generated_file_path = os.path.join(VALUES_TEST_DIRECTORY, f"{TMP_FILE_PREFIX}{file_name}") + with open(generated_file_path, "r") as file: + data = yaml.load(file) + generated_files.append(copy(data)) + + # add values files + edited_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values.yaml") + original_values_path = os.path.join(REFERENCE_FILES_DIRECTORY, "values.yaml") + with open(original_values_path, "r") as file: + data = yaml.load(file) + reference_files.append(copy(data)) + with open(edited_values_path, "r") as file: + data = yaml.load(file) + generated_files.append(copy(data)) + return reference_files, generated_files + +def delete_generated_files(): + reference_files_names = ["poller_inventory.yaml", "scheduler_profiles.yaml", "scheduler_groups.yaml"] + for file_name in reference_files_names: + generated_file_path = os.path.join(VALUES_TEST_DIRECTORY, f"{TMP_FILE_PREFIX}{file_name}") + if os.path.exists(generated_file_path): + os.remove(generated_file_path) + +def reset_generated_values(): + edited_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values.yaml") + original_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values-before-edit.yaml") + yaml = ruamel.yaml.YAML() + with open(original_values_path, "r") as file: + original_data = yaml.load(file) + with open(edited_values_path, "w") as file: + yaml.dump(original_data, file) common_id = "635916b2c8cb7a15f28af40a" -@mock.patch("SC4SNMP_UI_backend.apply_changes.handle_changes.datetime") -@mock.patch("SC4SNMP_UI_backend.apply_changes.handle_changes.run_job") +groups_collection = [ + { + "_id": ObjectId(common_id), + "group1": [ + {"address": "52.14.243.157", "port": 1163}, + {"address": "20.14.10.0", "port": 161}, + ], + }, + { + "_id": ObjectId(common_id), + "group2": [ + {"address": "0.10.20.30"}, + {"address": "52.14.243.157", "port": 1165, "version": "3", "secret": "mysecret", "security_engine": "aabbccdd1234"}, + ] + } +] + +profiles_collection = [ + { + "_id": ObjectId(common_id), + "single_metric":{ + "frequency": 60, + "varBinds":[['IF-MIB', 'ifMtu', '1']] + } + }, + { + "_id": ObjectId(common_id), + "small_walk":{ + "condition":{ + "type": "walk" + }, + "varBinds":[['IP-MIB'],['IF-MIB']] + } + }, + { + "_id": ObjectId(common_id), + "gt_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "gt", "value": 1} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "lt_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "lt", "value": 2} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "in_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifDescr", "operation": "in", "value": ["eth0", "test value"]} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "multiple_conditions":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "gt", "value": 1}, + {"field": "IF-MIB.ifDescr", "operation": "in", "value": ["eth0", "test value"]} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards'],['IF-MIB', 'ifOutErrors'],['IF-MIB', 'ifOutOctets']] + } + } +] + +inventory_collection = [ + { + "_id": ObjectId(common_id), + "address": "1.1.1.1", + "port": 161, + "version": "2c", + "community": "public", + "secret": "", + "security_engine": "", + "walk_interval": 1800, + "profiles": "small_walk;in_profile", + "smart_profiles": True, + "delete": False + }, + { + "_id": ObjectId(common_id), + "address": "group1", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "security_engine": "", + "walk_interval": 1800, + "profiles": "single_metric;multiple_conditions", + "smart_profiles": False, + "delete": False + } +] + +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_FILE", "values.yaml") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.KEEP_TEMP_FILES", "true") +@mock.patch("datetime.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") @mock.patch("pymongo.collection.Collection.update_one") @mock.patch("pymongo.collection.Collection.find") def test_apply_changes_first_call(m_find, m_update, m_run_job, m_datetime, client): datetime_object = datetime.datetime(2020, 7, 10, 10, 30, 0, 0) - m_datetime.datetime.utcnow = mock.Mock(return_value=datetime_object) + m_datetime.utcnow = mock.Mock(return_value=datetime_object) collection = { "_id": ObjectId(common_id), "previous_job_start_time": None, "currently_scheduled": False } m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler [collection], [collection], [collection] @@ -43,11 +210,17 @@ def test_apply_changes_first_call(m_find, m_update, m_run_job, m_datetime, clien m_find.assert_has_calls(calls_find) m_update.assert_has_calls(calls_update) m_run_job.apply_async.assert_has_calls(apply_async_calls) - assert response.json == {"message": "Configuration will be updated in approximately 300 seconds"} - + assert response.json == {"message": "Configuration will be updated in approximately 300 seconds."} + reference_files, generated_files = return_generated_and_reference_files() + for ref_f, gen_f in zip(reference_files, generated_files): + assert ref_f == gen_f + delete_generated_files() + reset_generated_values() -@mock.patch("SC4SNMP_UI_backend.apply_changes.handle_changes.datetime") -@mock.patch("SC4SNMP_UI_backend.apply_changes.handle_changes.run_job") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") @mock.patch("pymongo.collection.Collection.update_one") @mock.patch("pymongo.collection.Collection.find") def test_apply_changes_job_currently_scheduled(m_find, m_update, m_run_job, m_datetime, client): @@ -60,6 +233,9 @@ def test_apply_changes_job_currently_scheduled(m_find, m_update, m_run_job, m_da "currently_scheduled": True } m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler [collection], [collection], [collection] @@ -75,11 +251,15 @@ def test_apply_changes_job_currently_scheduled(m_find, m_update, m_run_job, m_da response = client.post("/apply-changes") m_find.assert_has_calls(calls_find) assert not m_run_job.apply_async.called - assert response.json == {"message": "Configuration will be updated in approximately 130 seconds"} + assert response.json == {"message": "Configuration will be updated in approximately 130 seconds."} + delete_generated_files() + reset_generated_values() -@mock.patch("SC4SNMP_UI_backend.apply_changes.handle_changes.datetime") -@mock.patch("SC4SNMP_UI_backend.apply_changes.handle_changes.run_job") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") @mock.patch("pymongo.collection.Collection.update_one") @mock.patch("pymongo.collection.Collection.find") def test_apply_changes_new_job_delay_1(m_find, m_update, m_run_job, m_datetime, client): @@ -92,6 +272,9 @@ def test_apply_changes_new_job_delay_1(m_find, m_update, m_run_job, m_datetime, "currently_scheduled": False } m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler [collection], [collection], [collection] @@ -111,51 +294,6 @@ def test_apply_changes_new_job_delay_1(m_find, m_update, m_run_job, m_datetime, response = client.post("/apply-changes") m_find.assert_has_calls(calls_find) m_run_job.apply_async.assert_has_calls(apply_async_calls) - assert response.json == {"message": "Configuration will be updated in approximately 1 seconds"} - - -@mock.patch("SC4SNMP_UI_backend.apply_changes.handle_changes.datetime") -@mock.patch("SC4SNMP_UI_backend.apply_changes.handle_changes.run_job") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_apply_changes_new_job_delay_when_previous_failed(m_find, m_update, m_run_job, m_datetime, client): - datetime_object_old = datetime.datetime(2020, 7, 10, 10, 20, 0, 0) - datetime_object_new = datetime.datetime(2020, 7, 10, 10, 30, 0, 0) - m_datetime.datetime.utcnow = mock.Mock(return_value=datetime_object_new) - collection_failed = { - "_id": ObjectId(common_id), - "previous_job_start_time": datetime_object_old, - "currently_scheduled": True - } - collection_updated = { - "_id": ObjectId(common_id), - "previous_job_start_time": datetime_object_old, - "currently_scheduled": False - } - - m_find.side_effect = [ - [collection_failed], - [collection_failed], - [collection_updated] - ] - calls_find = [ - call(), - call(), - call() - ] - calls_update = [ - call({"_id": ObjectId(common_id)}, {"$set": {"currently_scheduled": False}}), - call({"_id": ObjectId(common_id)},{"$set": {"currently_scheduled": True}}) - ] - apply_async_calls = [ - call(countdown=1, queue='apply_changes') - ] - - m_run_job.apply_async.return_value = None - m_update.return_value = None - - response = client.post("/apply-changes") - m_find.assert_has_calls(calls_find) - m_update.assert_has_calls(calls_update) - m_run_job.apply_async.assert_has_calls(apply_async_calls) - assert response.json == {"message": "Configuration will be updated in approximately 1 seconds"} \ No newline at end of file + assert response.json == {"message": "Configuration will be updated in approximately 1 seconds."} + delete_generated_files() + reset_generated_values() diff --git a/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml b/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml new file mode 100644 index 0000000..6e18d10 --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml @@ -0,0 +1,4 @@ +inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;in_profile,t,f + group1,1161,2c,public,,,1800,single_metric;multiple_conditions,f,f diff --git a/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml b/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml new file mode 100644 index 0000000..4017b3c --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml @@ -0,0 +1,12 @@ +group1: +- address: 52.14.243.157 + port: 1163 +- address: 20.14.10.0 + port: 161 +group2: +- address: 0.10.20.30 +- address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + security_engine: aabbccdd1234 diff --git a/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml b/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml new file mode 100644 index 0000000..151a2ad --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml @@ -0,0 +1,51 @@ +single_metric: + frequency: 60 + varBinds: + - ['IF-MIB', 'ifMtu', '1'] +small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] +gt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + - ['IF-MIB', 'ifOutErrors'] + - ['IF-MIB', 'ifOutOctets'] \ No newline at end of file diff --git a/backend/tests/yamls_for_tests/reference_files/values.yaml b/backend/tests/yamls_for_tests/reference_files/values.yaml new file mode 100644 index 0000000..8bc3a57 --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/values.yaml @@ -0,0 +1,161 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + group1: + - address: 52.14.243.157 + port: 1163 + - address: 20.14.10.0 + port: 161 + group2: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + security_engine: aabbccdd1234 + profiles: | + single_metric: + frequency: 60 + varBinds: + - ['IF-MIB', 'ifMtu', '1'] + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + gt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + - ['IF-MIB', 'ifOutErrors'] + - ['IF-MIB', 'ifOutOctets'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;in_profile,t,f + group1,1161,2c,public,,,1800,single_metric;multiple_conditions,f,f +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true diff --git a/backend/tests/yamls_for_tests/values_test/.gitignore b/backend/tests/yamls_for_tests/values_test/.gitignore new file mode 100644 index 0000000..0372f75 --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/.gitignore @@ -0,0 +1 @@ +sc4snmp_ui_*.yaml diff --git a/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml b/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml new file mode 100644 index 0000000..fc5bebf --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml @@ -0,0 +1,139 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + some_group: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + - address: 10.1.3.157 + port: 1165 + profiles: | + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + conditional_profile_greater_than: + frequency: 100 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;conditional_profile_greater_than,t,f + some_group,1161,2c,public,,,1800,single_metric;in_profile,f,t + 156.0.10.91,161,2c,public,,,1800,conditional_profile_greater_than,t,t +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true diff --git a/backend/tests/yamls_for_tests/values_test/values.yaml b/backend/tests/yamls_for_tests/values_test/values.yaml new file mode 100644 index 0000000..6b88a85 --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/values.yaml @@ -0,0 +1,139 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + some_group: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + - address: 10.1.3.157 + port: 1165 + profiles: | + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + conditional_profile_greater_than: + frequency: 100 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;conditional_profile_greater_than,t,f + some_group,1161,2c,public,,,1800,single_metric;in_profile,f,t + 156.0.10.91,161,2c,public,,,1800,conditional_profile_greater_than,t,t +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true