diff --git a/.github/workflows/ci-build-backend.yaml b/.github/workflows/ci-build-backend.yaml deleted file mode 100644 index 9641d2e..0000000 --- a/.github/workflows/ci-build-backend.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# ######################################################################## -# Copyright 2021 Splunk Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ######################################################################## - -name: ci-build-backend -on: - pull_request: - branches: - - "main" - - "develop" - - "next" - push: - branches: - - "main" - - "develop" - - "next" - tags-ignore: - - "v*" - -jobs: - release: - name: Release - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - submodules: false - persist-credentials: false - - #Build docker images - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to GitHub Packages Docker Registry - uses: docker/login-action@v1.9.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Docker meta - id: docker_meta - uses: docker/metadata-action@v3 - with: - images: ghcr.io/splunk/sc4snmp-ui/backend/container - tags: | - type=semver,pattern=v{{major}}.{{minor}} - type=semver,pattern=v{{major}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=semver,pattern={{version}} - type=ref,event=branch - type=ref,event=pr - - name: Build and push action - backend - id: docker_action_build_backend - uses: docker/build-push-action@v2 - with: - context: backend - push: true - platforms: linux/amd64,linux/arm64 - tags: ${{ steps.docker_meta.outputs.tags }} - labels: ${{ steps.docker_meta.outputs.labels }} - cache-to: type=inline - - uses: actions/setup-node@v2 - with: - node-version: "14" - diff --git a/.github/workflows/ci-build-frontend.yaml b/.github/workflows/ci-build.yaml similarity index 57% rename from .github/workflows/ci-build-frontend.yaml rename to .github/workflows/ci-build.yaml index f9982d9..ed2b9b0 100644 --- a/.github/workflows/ci-build-frontend.yaml +++ b/.github/workflows/ci-build.yaml @@ -14,7 +14,7 @@ # limitations under the License. # ######################################################################## -name: ci-build-frontend +name: ci-build on: pull_request: branches: @@ -30,9 +30,12 @@ on: - "v*" jobs: - release: - name: Release + build-frontend: + name: build-frontend runs-on: ubuntu-latest + permissions: + contents: read + packages: write steps: - uses: actions/checkout@v2 with: @@ -50,7 +53,7 @@ jobs: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Docker meta + - name: Docker meta - frontend id: docker_meta uses: docker/metadata-action@v3 with: @@ -63,13 +66,12 @@ jobs: type=semver,pattern={{major}} type=semver,pattern={{version}} type=ref,event=branch - type=ref,event=pr - name: Build and push action - frontend id: docker_action_build_frontend uses: docker/build-push-action@v2 with: context: frontend - push: true + push: false platforms: linux/amd64,linux/arm64 tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} @@ -78,3 +80,52 @@ jobs: with: node-version: "14" + build-backend: + name: build-backend + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + + #Build docker images + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to GitHub Packages Docker Registry + uses: docker/login-action@v1.9.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta - backend + id: docker_meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/splunk/sc4snmp-ui/backend/container + tags: | + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern={{version}} + type=ref,event=branch + - name: Build and push action - backend + id: docker_action_build_backend + uses: docker/build-push-action@v2 + with: + context: backend + push: false + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + cache-to: type=inline + - uses: actions/setup-node@v2 + with: + node-version: "14" diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml new file mode 100644 index 0000000..2107136 --- /dev/null +++ b/.github/workflows/ci-main.yaml @@ -0,0 +1,80 @@ +# ######################################################################## +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ######################################################################## + +name: ci-main +on: + push: + branches: + - "main" + - "develop" + - "next" + tags-ignore: + - "v*" + pull_request: + branches: + - "main" + - "develop" + - "next" +jobs: + test-unit-backend: + name: Test Backend Unit Python ${{ matrix.python-version }} + runs-on: ubuntu-latest + env: + VALUES_DIRECTORY: /tmp + strategy: + matrix: + python-version: + - 3.9 + steps: + - uses: actions/checkout@v3 + - name: Setup python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install packages + working-directory: ./backend + run: pip install -r ./requirements.txt + - name: Run Pytest + working-directory: ./backend + run: pytest + test-unit-frontned: + name: Test Frontend Unit Node ${{ matrix.node-version }} + runs-on: ubuntu-latest + strategy: + matrix: + node-version: + - 16 + steps: + - uses: actions/checkout@v3 + - name: Set Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v3 + with: + node-version: ${{ matrix.node-version }} + - name: Run install + uses: borales/actions-yarn@v4 + with: + cmd: install + dir: 'frontend' + - name: Build + uses: borales/actions-yarn@v4 + with: + cmd: build + dir: 'frontend' + - name: Run test in sub-folder + uses: borales/actions-yarn@v4 + with: + cmd: test + dir: 'frontend' \ No newline at end of file diff --git a/.github/workflows/ci-release.yaml b/.github/workflows/ci-release.yaml new file mode 100644 index 0000000..10b5c30 --- /dev/null +++ b/.github/workflows/ci-release.yaml @@ -0,0 +1,153 @@ +# ######################################################################## +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ######################################################################## + +name: ci-release +on: + push: + branches: + - "main" + - "develop" + - "next" + tags-ignore: + - "v*" + +jobs: + build-frontend: + name: build-frontend + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + + #Build docker images + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to GitHub Packages Docker Registry + uses: docker/login-action@v1.9.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta - frontend + id: docker_meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/splunk/sc4snmp-ui/frontend/container + tags: | + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern={{version}} + type=ref,event=branch + - name: Build and push action - frontend + id: docker_action_build_frontend + uses: docker/build-push-action@v2 + with: + context: frontend + push: true + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + cache-to: type=inline + - uses: actions/setup-node@v2 + with: + node-version: "14" + + build-backend: + name: build-backend + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + + #Build docker images + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to GitHub Packages Docker Registry + uses: docker/login-action@v1.9.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta - backend + id: docker_meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/splunk/sc4snmp-ui/backend/container + tags: | + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern={{version}} + type=ref,event=branch + - name: Build and push action - backend + id: docker_action_build_backend + uses: docker/build-push-action@v2 + with: + context: backend + push: true + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + cache-to: type=inline + - uses: actions/setup-node@v2 + with: + node-version: "14" + release: + name: Release + needs: [build-frontend, build-backend] + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + - uses: actions/setup-node@v2 + with: + node-version: "14" + - name: Semantic Release + id: version + uses: cycjimmy/semantic-release-action@v3.2.0 + with: + semantic_version: 17 + extra_plugins: | + @semantic-release/exec + @semantic-release/git + @google/semantic-release-replace-plugin@1.2.0 + env: + GITHUB_TOKEN: ${{ secrets.GH_TOKEN_ADMIN }} + diff --git a/.releaserc b/.releaserc new file mode 100644 index 0000000..1b8fd16 --- /dev/null +++ b/.releaserc @@ -0,0 +1,86 @@ +# +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{ + "branches": + [ + "+([0-9])?(.{+([0-9]),x}).x", + "main", + { name: "next", channel: "alpha", prerelease: "alpha" }, + { name: "develop", channel: "beta", prerelease: "beta" }, + ], + plugins: + [ + "@semantic-release/commit-analyzer", + [ + "@google/semantic-release-replace-plugin", + { + "replacements": [ + { + "files": ["backend/SC4SNMP_UI_backend/__init__.py"], + "from": "__version__ ?=.*", + "to": "__version__ = \"${nextRelease.version}\"", + "results": [ + { + "file": "backend/SC4SNMP_UI_backend/__init__.py", + "hasChanged": true, + "numMatches": 1, + "numReplacements": 1 + } + ], + "countMatches": true + }, + { + "files": ["frontend/packages/manager/package.json"], + "from": ".*\"version\":.*", + "to": " \"version\": \"${nextRelease.version}\",", + "results": [ + { + "file": "frontend/packages/manager/package.json", + "hasChanged": true, + "numMatches": 1, + "numReplacements": 1 + } + ], + "countMatches": true + }, + { + "files": ["frontend/lerna.json"], + "from": ".*\"version\":.*", + "to": " \"version\": \"${nextRelease.version}\",", + "results": [ + { + "file": "frontend/lerna.json", + "hasChanged": true, + "numMatches": 1, + "numReplacements": 1 + } + ], + "countMatches": true + } + ] + } + ], + "@semantic-release/release-notes-generator", + [ + "@semantic-release/git", + { + "assets": ["NOTICE", "frontend/lerna.json", "frontend/packages/manager/package.json", "backend/SC4SNMP_UI_backend/__init__.py"], + "message": "chore(release): ${nextRelease.version}\n\n${nextRelease.notes}", + }, + ], + ["@semantic-release/github", { "assets": ["NOTICE"] }], + ], +} diff --git a/backend/Dockerfile b/backend/Dockerfile index 49eb52e..d1d734c 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -6,5 +6,14 @@ COPY SC4SNMP_UI_backend ./SC4SNMP_UI_backend RUN pip install -r ./requirements.txt ENV FLASK_DEBUG production + +COPY ./flask_start.sh /flask_start.sh +RUN chmod +x /flask_start.sh + +COPY ./celery_start.sh /celery_start.sh +RUN chmod +x /celery_start.sh + +USER 10000:10000 + EXPOSE 5000 -CMD ["gunicorn", "-b", ":5000", "app:app", "--log-level", "DEBUG"] \ No newline at end of file +CMD ["gunicorn", "-b", ":5000", "app:flask_app", "--log-level", "INFO"] \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/__init__.py b/backend/SC4SNMP_UI_backend/__init__.py index abcd480..bda44d3 100644 --- a/backend/SC4SNMP_UI_backend/__init__.py +++ b/backend/SC4SNMP_UI_backend/__init__.py @@ -2,25 +2,63 @@ from pymongo import MongoClient import os import logging +from celery import Celery +from celery import Task +from dotenv import load_dotenv -try: - from dotenv import load_dotenv +load_dotenv() - load_dotenv() -except: - pass +__version__ = "1.0.0-beta.18" MONGO_URI = os.getenv("MONGO_URI") mongo_client = MongoClient(MONGO_URI) +CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "amqp://guest:guest@localhost:5672//") +REDIS_URL = os.getenv("REDIS_URL") +VALUES_DIRECTORY = os.getenv("VALUES_DIRECTORY", "") +KEEP_TEMP_FILES = os.getenv("KEEP_TEMP_FILES", "false") +class NoValuesDirectoryException(Exception): + pass def create_app(): + if len(VALUES_DIRECTORY) == 0: + raise NoValuesDirectoryException + app = Flask(__name__) - from SC4SNMP_UI_backend.ui_handling.routes import ui - app.register_blueprint(ui) + app.config.from_mapping( + CELERY=dict( + task_default_queue="apply_changes", + broker_url=CELERY_BROKER_URL, + beat_scheduler="redbeat.RedBeatScheduler", + redbeat_redis_url = REDIS_URL, + task_ignore_result=True, + redbeat_lock_key=None, + ), + ) + celery_init_app(app) + from SC4SNMP_UI_backend.profiles.routes import profiles_blueprint + from SC4SNMP_UI_backend.groups.routes import groups_blueprint + from SC4SNMP_UI_backend.inventory.routes import inventory_blueprint + from SC4SNMP_UI_backend.apply_changes.routes import apply_changes_blueprint + app.register_blueprint(profiles_blueprint) + app.register_blueprint(groups_blueprint) + app.register_blueprint(inventory_blueprint) + app.register_blueprint(apply_changes_blueprint) gunicorn_logger = logging.getLogger('gunicorn.error') app.logger.handlers = gunicorn_logger.handlers app.logger.setLevel(gunicorn_logger.level) return app + +def celery_init_app(app: Flask) -> Celery: + class FlaskTask(Task): + def __call__(self, *args: object, **kwargs: object) -> object: + with app.app_context(): + return self.run(*args, **kwargs) + + celery_app = Celery(app.name, task_cls=FlaskTask) + celery_app.config_from_object(app.config["CELERY"]) + celery_app.set_default() + app.extensions["celery"] = celery_app + return celery_app diff --git a/backend/SC4SNMP_UI_backend/ui_handling/__init__.py b/backend/SC4SNMP_UI_backend/apply_changes/__init__.py similarity index 100% rename from backend/SC4SNMP_UI_backend/ui_handling/__init__.py rename to backend/SC4SNMP_UI_backend/apply_changes/__init__.py diff --git a/backend/SC4SNMP_UI_backend/apply_changes/apply_changes.py b/backend/SC4SNMP_UI_backend/apply_changes/apply_changes.py new file mode 100644 index 0000000..b220f9d --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/apply_changes.py @@ -0,0 +1,69 @@ +from threading import Lock +import os +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.apply_changes.handling_chain import CheckJobHandler, ScheduleHandler, SaveConfigToFileHandler +from SC4SNMP_UI_backend.apply_changes.config_to_yaml_utils import ProfilesToYamlDictConversion, ProfilesTempHandling, \ + GroupsToYamlDictConversion, GroupsTempHandling, InventoryToYamlDictConversion, InventoryTempHandling + + +MONGO_URI = os.getenv("MONGO_URI") +JOB_CREATION_RETRIES = int(os.getenv("JOB_CREATION_RETRIES", 10)) +mongo_config_collection = mongo_client.sc4snmp.config_collection +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui +mongo_profiles = mongo_client.sc4snmp.profiles_ui + + + +class SingletonMeta(type): + _instances = {} + _lock: Lock = Lock() + + def __call__(cls, *args, **kwargs): + with cls._lock: + if cls not in cls._instances: + instance = super().__call__(*args, **kwargs) + cls._instances[cls] = instance + return cls._instances[cls] + +class ApplyChanges(metaclass=SingletonMeta): + def __init__(self) -> None: + """ + ApplyChanges is a singleton responsible for creating mongo record with a current state of kubernetes job. + Structure of the record: + { + "previous_job_start_time": datetime.datetime or None if job hasn't been scheduled yet, + "currently_scheduled": bool + } + """ + self.__handling_chain = SaveConfigToFileHandler() + check_job_handler = CheckJobHandler() + schedule_handler = ScheduleHandler() + self.__handling_chain.set_next(check_job_handler).set_next(schedule_handler) + mongo_config_collection.update_one( + { + "previous_job_start_time": {"$exists": True}, + "currently_scheduled": {"$exists": True}} + ,{ + "$set":{ + "previous_job_start_time": None, + "currently_scheduled": False + } + }, + upsert=True + ) + + + def apply_changes(self): + """ + Run chain of actions to schedule new kubernetes job. + """ + yaml_sections = { + "scheduler.groups": (mongo_groups, GroupsToYamlDictConversion, GroupsTempHandling), + "scheduler.profiles": (mongo_profiles, ProfilesToYamlDictConversion, ProfilesTempHandling), + "poller.inventory": (mongo_inventory, InventoryToYamlDictConversion, InventoryTempHandling) + } + return self.__handling_chain.handle({ + "yaml_sections": yaml_sections + }) + diff --git a/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py b/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py new file mode 100644 index 0000000..2ed1900 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py @@ -0,0 +1,232 @@ +from abc import abstractmethod +import ruamel +from ruamel.yaml.scalarstring import SingleQuotedScalarString as single_quote +from ruamel.yaml.scalarstring import DoubleQuotedScalarString as double_quote +from SC4SNMP_UI_backend.common.backend_ui_conversions import get_group_or_profile_name_from_backend +from ruamel.yaml.scalarstring import LiteralScalarString as literal_string +import os +from flask import current_app + + +def bool_to_str(value): + if value: + return "t" + else: + return "f" + + +class MongoToYamlDictConversion: + """ + MongoToYamlDictConversion is an abstract class. Implementations of this class converts + appropriate mongo collections to dictionaries in such a way, that configurations from those collections can be + dumped to yaml file with appropriate formatting. + """ + @classmethod + def yaml_escape_list(cls, *l): + """ + This function is used to parse an example list [yaml_escape_list(el1, el2, el3)] like this: + - [el1, el2, el3] + and not like this: + - el1 + - el2 + - el3 + """ + ret = ruamel.yaml.comments.CommentedSeq(l) + ret.fa.set_flow_style() + return ret + @abstractmethod + def convert(self, documents: list) -> dict: + pass + + +class ProfilesToYamlDictConversion(MongoToYamlDictConversion): + def convert(self, documents: list) -> dict: + """ + ProfilesToYamlDictConversion converts profiles from mongo collection to + format that can be dumped to yaml file + :param documents: list of profiles from mongo + :return: dictionary that can be dumped to yaml + """ + result = {} + for profile in documents: + profile_name = get_group_or_profile_name_from_backend(profile) + prof = profile[profile_name] + var_binds = [] + condition = None + conditions = None + is_walk_profile = False + + for var_bind in prof["varBinds"]: + var_binds.append(self.yaml_escape_list(*[single_quote(vb) for vb in var_bind])) + + if "condition" in prof: + backend_condition = prof["condition"] + condition_type = backend_condition["type"] + is_walk_profile = True if backend_condition["type"] == "walk" else False + condition = { + "type": condition_type + } + if condition_type == "field": + condition["field"] = backend_condition["field"] + condition["patterns"] = [single_quote(pattern) for pattern in backend_condition["patterns"]] + + if "conditions" in prof: + backend_conditions = prof["conditions"] + conditions = [] + for cond in backend_conditions: + if cond["operation"] == "in": + value = [double_quote(v) if type(v) == str else v for v in cond["value"]] + else: + value = double_quote(cond["value"]) if type(cond["value"]) == str else cond["value"] + conditions.append({ + "field": cond["field"], + "operation": double_quote(cond["operation"]), + "value": value + }) + + result[profile_name] = {} + if not is_walk_profile: + result[profile_name]["frequency"] = prof['frequency'] + if condition is not None: + result[profile_name]["condition"] = condition + if conditions is not None: + result[profile_name]["conditions"] = conditions + result[profile_name]["varBinds"] = var_binds + + return result + + +class GroupsToYamlDictConversion(MongoToYamlDictConversion): + def convert(self, documents: list) -> dict: + """ + GroupsToYamlDictConversion converts groups from mongo collection to + format that can be dumped to yaml file + :param documents: list of groups from mongo + :return: dictionary that can be dumped to yaml + """ + result = {} + for group in documents: + group_name = get_group_or_profile_name_from_backend(group) + gr = group[group_name] + hosts = [] + for host in gr: + host_config = host + if "community" in host: + host_config["community"] = single_quote(host["community"]) + if "secret" in host: + host_config["secret"] = single_quote(host["secret"]) + if "version" in host: + host_config["version"] = single_quote(host["version"]) + hosts.append(host_config) + result[group_name] = hosts + return result + + +class InventoryToYamlDictConversion(MongoToYamlDictConversion): + def convert(self, documents: list) -> dict: + """ + InventoryToYamlDictConversion converts inventory from mongo collection to + format that can be dumped to yaml file + :param documents: inventory from mongo + :return: dictionary that can be dumped to yaml + """ + inventory_string = "address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete" + for inv in documents: + smart_profiles = bool_to_str(inv['smart_profiles']) + inv_delete = bool_to_str(inv['delete']) + inventory_string += f"\n{inv['address']},{inv['port']},{inv['version']},{inv['community']}," \ + f"{inv['secret']},{inv['security_engine']},{inv['walk_interval']},{inv['profiles']}," \ + f"{smart_profiles},{inv_delete}" + return { + "inventory": literal_string(inventory_string) + } + + +class TempFileHandling: + """ + After converting configurations from mongo to dictionaries ready to be dumped to yaml file, those dictionaries + must be dumped to temporary files. This is because those configurations must be parsed before they are inserted + to values.yaml file. TempFileHandling is an abstract class whose implementations parse dictionaries and return + ready configuration that can be saved in values.yaml + """ + def __init__(self, file_path: str): + self._file_path = file_path + + def _save_temp(self, content): + yaml = ruamel.yaml.YAML() + with open(self._file_path, "w") as file: + yaml.dump(content, file) + + def _delete_temp(self): + if os.path.exists(self._file_path): + os.remove(self._file_path) + else: + current_app.logger.info(f"Directory {self._file_path} doesn't exist inside a Pod. File wasn't removed.") + + @abstractmethod + def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + pass + + +class ProfilesTempHandling(TempFileHandling): + def __init__(self, file_path: str): + super().__init__(file_path) + + def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with profiles configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ + self._save_temp(document) + lines = "" + with open(self._file_path, "r") as file: + line = file.readline() + while line != "": + lines += line + line = file.readline() + if delete_tmp: + self._delete_temp() + return literal_string(lines) + + +class InventoryTempHandling(TempFileHandling): + def __init__(self, file_path: str): + super().__init__(file_path) + + def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with inventory configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ + self._save_temp(document) + yaml = ruamel.yaml.YAML() + with open(self._file_path, "r") as file: + inventory = yaml.load(file) + result = inventory["inventory"] + if delete_tmp: + self._delete_temp() + return literal_string(result) + + +class GroupsTempHandling(TempFileHandling): + def __init__(self, file_path: str): + super().__init__(file_path) + + def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with groups configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ + self._save_temp(document) + lines = "" + with open(self._file_path, "r") as file: + line = file.readline() + while line != "": + lines += line + line = file.readline() + if delete_tmp: + self._delete_temp() + return literal_string(lines) diff --git a/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py b/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py new file mode 100644 index 0000000..fd8e51a --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py @@ -0,0 +1,169 @@ +from abc import abstractmethod, ABC +import ruamel.yaml +from flask import current_app +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.apply_changes.tasks import run_job +import datetime +import os + + +CHANGES_INTERVAL_SECONDS = 300 +TMP_FILE_PREFIX = "sc4snmp_ui_" +TMP_DIR = "/tmp" +VALUES_DIRECTORY = os.getenv("VALUES_DIRECTORY", "") +VALUES_FILE = os.getenv("VALUES_FILE", "") +KEEP_TEMP_FILES = os.getenv("KEEP_TEMP_FILES", "false") +mongo_config_collection = mongo_client.sc4snmp.config_collection +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui +mongo_profiles = mongo_client.sc4snmp.profiles_ui + +class Handler(ABC): + @abstractmethod + def set_next(self, handler): + pass + + @abstractmethod + def handle(self, request): + pass + + +class AbstractHandler(Handler): + _next_handler: Handler = None + + def set_next(self, handler: Handler) -> Handler: + self._next_handler = handler + return handler + + @abstractmethod + def handle(self, request: dict): + if self._next_handler: + return self._next_handler.handle(request) + return None + + +class SaveConfigToFileHandler(AbstractHandler): + def handle(self, request: dict): + """ + SaveConfigToFileHandler saves current configuration of profiles, groups and inventory from mongo + to files on the host machine. + + :param request: dictionary with at least one key "yaml_sections". Under this key there should be dictionary + with the following structure + { + "key.to.section": (mongo_collection, MongoToYamlDictConversion, TempFileHandling) + } + where: + - "key.to.section": a key to section of values.yaml file that should be updated (e.g. "scheduler.profiles") + - mongo_collection: mongo collection with configuration of given section + - MongoToYamlDictConversion: implementation of this abstract class + - TempFileHandling: implementation of this abstract class + """ + + yaml = ruamel.yaml.YAML() + values_file_resolved = True + values_file_path = os.path.join(VALUES_DIRECTORY, VALUES_FILE) + + if len(VALUES_FILE) == 0 or (VALUES_FILE.split(".")[1] != "yaml" and VALUES_FILE.split(".")[1] != "yml") or \ + not os.path.exists(os.path.join(VALUES_DIRECTORY, VALUES_FILE)): + # If VALUES_FILE can't be found or wasn't provided, it won't be updated. In this case separate files + # with configuration of specific section will be saved in the hosts machine. + values_file_resolved = False + values = {} + if values_file_resolved: + with open(values_file_path, "r") as file: + values = yaml.load(file) + + if not values_file_resolved or KEEP_TEMP_FILES.lower() in ["t", "true", "y", "yes", "1"]: + delete_temp_files = False + else: + delete_temp_files = True + + for key, value in request["yaml_sections"].items(): + tmp_file_name = TMP_FILE_PREFIX + key.replace(".", "_") + ".yaml" + directory = VALUES_DIRECTORY if not delete_temp_files else TMP_DIR + tmp_file_path = os.path.join(directory, tmp_file_name) + + mongo_collection = value[0] + mongo_to_yaml_conversion = value[1]() + tmp_file_handling = value[2](tmp_file_path) + + documents = list(mongo_collection.find()) + converted = mongo_to_yaml_conversion.convert(documents) + parsed_values = tmp_file_handling.parse_dict_to_yaml(converted, delete_temp_files) + + # update appropriate section values dictionary + values_keys = key.split(".") + sub_dict = values + for value_index, value_key in enumerate(values_keys): + if value_index == len(values_keys)-1: + sub_dict[value_key] = parsed_values + else: + sub_dict = sub_dict.get(value_key, {}) + + if values_file_resolved: + with open(values_file_path, "w") as file: + yaml.dump(values, file) + + next_chain_request = {} + if "next" in request: + next_chain_request = request["next"] + return super().handle(next_chain_request) + + +class CheckJobHandler(AbstractHandler): + def handle(self, request: dict = None): + """ + CheckJobHandler checks whether a new kubernetes job with updated sc4snmp configuration can be run immediately + or should it be scheduled for the future. + + :return: pass dictionary with job_delay in seconds to the next handler + """ + record = list(mongo_config_collection.find())[0] + last_update = record["previous_job_start_time"] + if last_update is None: + # If it's the first time that the job is run (record in mongo_config_collection has been created + # in ApplyChanges class and last_update attribute is None) then job delay should be equal to + # CHANGES_INTERVAL_SECONDS. Update the mongo record with job state accordingly. + job_delay = CHANGES_INTERVAL_SECONDS + mongo_config_collection.update_one({"_id": record["_id"]}, + {"$set": {"previous_job_start_time": datetime.datetime.utcnow()}}) + # time from the last update + time_difference = 0 + else: + # Check how many seconds have elapsed since the last time that the job was run. If the time difference + # is greater than CHANGES_INTERVAL_SECONDS then job can be run immediately. Otherwise, calculate how + # many seconds are left until minimum time difference between updates (CHANGES_INTERVAL_SECONDS). + current_time = datetime.datetime.utcnow() + delta = current_time - last_update + time_difference = delta.total_seconds() + if time_difference > CHANGES_INTERVAL_SECONDS: + job_delay = 1 + else: + job_delay = int(CHANGES_INTERVAL_SECONDS - time_difference) + + result = { + "job_delay": job_delay, + "time_from_last_update": time_difference + } + + current_app.logger.info(f"CheckJobHandler: {result}") + return super().handle(result) + + +class ScheduleHandler(AbstractHandler): + def handle(self, request: dict): + """ + ScheduleHandler schedules the kubernetes job with updated sc4snmp configuration + """ + record = list(mongo_config_collection.find())[0] + if not record["currently_scheduled"]: + # If the task isn't currently scheduled, schedule it and update its state in mongo. + mongo_config_collection.update_one({"_id": record["_id"]}, + {"$set": {"currently_scheduled": True}}) + run_job.apply_async(countdown=request["job_delay"], queue='apply_changes') + current_app.logger.info( + f"ScheduleHandler: scheduling new task with the delay of {request['job_delay']} seconds.") + else: + current_app.logger.info("ScheduleHandler: new job wasn't scheduled.") + return request["job_delay"], record["currently_scheduled"] \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/apply_changes/kubernetes_job.py b/backend/SC4SNMP_UI_backend/apply_changes/kubernetes_job.py new file mode 100644 index 0000000..147d410 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/kubernetes_job.py @@ -0,0 +1,189 @@ +from kubernetes import client +from copy import copy +from celery.utils.log import get_task_logger + +logger = get_task_logger(__name__) + +# Functions in this file create different sections of kubernetes job, +# based on job config yaml file from splunk-connect-for-snmp. + +def create_container(container: dict): + """ + Create a container object from yaml configuration. + + :param container: Parsed yaml configuration of a single container from + spec.template.spec.containers section. + :return: V1Container + """ + name = container["name"] + image = container["image"] + image_pull_policy = container["imagePullPolicy"] + args = container["args"] + env = [] + for e in container["env"]: + env_var = client.V1EnvVar(name=e["name"], + value=e["value"]) + env.append(copy(env_var)) + volume_mounts = [] + for v in container["volumeMounts"]: + vol = client.V1VolumeMount(name=v["name"], + mount_path=v["mountPath"], + read_only=v["readOnly"]) + volume_mounts.append(copy(vol)) + container_object = client.V1Container(name=name, + image=image, + image_pull_policy=image_pull_policy, + args=args, + env=env, + volume_mounts=volume_mounts) + return container_object + + +def create_volume(volume: dict): + """ + Create a volume object from yaml configuration. + + :param volume: Parsed yaml configuration of a single volume from + spec.template.spec.volumes section. + :return: V1Volume + """ + name = volume["name"] + if "configMap" in volume: + config_map_name = volume["configMap"]["name"] + items = [] + for i in volume["configMap"]["items"]: + item = client.V1KeyToPath(key=i["key"], + path=i["path"]) + items.append(copy(item)) + config_map = client.V1ConfigMapVolumeSource(name=config_map_name, + items=items) + volume_object = client.V1Volume(name=name, + config_map=config_map) + else: + volume_object = client.V1Volume(name=name, + empty_dir=client.V1EmptyDirVolumeSource()) + return volume_object + + +def create_pod_spec(spec: dict): + """ + Create spec.template.spec section of a kubernetes job. + + :param spec: Parsed yaml spec.template.spec configuration + :return: V1PodSpec + """ + containers = [create_container(c) for c in spec["containers"]] + volumes = [create_volume(v) for v in spec["volumes"]] + restart_policy = spec["restartPolicy"] + secrets = None + if "imagePullSecrets" in spec: + secrets = [] + for secret in spec["imagePullSecrets"]: + new_secret = client.V1LocalObjectReference(name=secret["name"]) + secrets.append(new_secret) + if secrets is not None: + spec_object = client.V1PodSpec(containers=containers, + volumes=volumes, + restart_policy=restart_policy, + image_pull_secrets=secrets) + else: + spec_object = client.V1PodSpec(containers=containers, + volumes=volumes, + restart_policy=restart_policy) + return spec_object + + +def create_pod_metadata(metadata: dict): + """ + Create spec.template.metadata section of a kubernetes job. + + :param metadata: Parsed yaml spec.template.metadata configuration + :return: V1ObjectMeta + """ + labels = {} + for key, value in metadata["labels"].items(): + labels[key] = value + annotations = None + if "annotations" in metadata: + annotations = {} + for key, value in metadata["annotations"].items(): + annotations[key] = value + if annotations is not None: + metadata_object = client.V1ObjectMeta(annotations=annotations, labels=labels) + else: + metadata_object = client.V1ObjectMeta(labels=labels) + return metadata_object + + +def create_pod_template(pod_template: dict): + """ + Create spec.template section of a kubernetes job. + + :param pod_template: Parsed yaml spec.template configuration + :return: V1PodTemplateSpec + """ + metadata = create_pod_metadata(pod_template["metadata"]) + spec = create_pod_spec(pod_template["spec"]) + template_object = client.V1PodTemplateSpec(metadata=metadata, + spec=spec) + return template_object + + +def create_job_spec(spec: dict): + """ + Create spec section of a kubernetes job. + + :param spec: Parsed yaml job spec configuration + :return:V1JobSpec + """ + ttl_seconds_after_finished = spec["ttlSecondsAfterFinished"] + template = create_pod_template(spec["template"]) + job_spec_object = client.V1JobSpec(ttl_seconds_after_finished=ttl_seconds_after_finished, + template=template) + return job_spec_object + + +def create_job_metadata(metadata: dict): + """ + Create metadata section of a kubernetes job. + + :param metadata: Parsed yaml job metadata configuration + :return: V1ObjectMeta + """ + name = metadata["name"] + labels = {} + for key, value in metadata["labels"].items(): + labels[key] = value + metadata_object = client.V1ObjectMeta(name=name, + labels=labels) + return metadata_object + + +def create_job_object(config_file: dict): + """ + Create job object based on provided configuration file + + :param config_file: Parsed yaml job configuration file + :return: V1Job + """ + metadata = create_job_metadata(config_file["metadata"]) + spec = create_job_spec(config_file["spec"]) + job = client.V1Job( + api_version="batch/v1", + kind="Job", + metadata=metadata, + spec=spec) + + return job + +def create_job(api_instance, job, namespace): + """ + Create new job in kubernetes namespace + """ + if api_instance is None or job is None: + logger.debug("Api instance and job must not be None") + else: + api_response = api_instance.create_namespaced_job( + body=job, + namespace=namespace) + logger.info(f"Job created. status='{str(api_response.status)}'") \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/apply_changes/routes.py b/backend/SC4SNMP_UI_backend/apply_changes/routes.py new file mode 100644 index 0000000..c6089f4 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/routes.py @@ -0,0 +1,22 @@ +from flask import Blueprint, jsonify +from flask_cors import cross_origin +from SC4SNMP_UI_backend.apply_changes.apply_changes import ApplyChanges +import os + +apply_changes_blueprint = Blueprint('common_blueprint', __name__) +JOB_CREATION_RETRIES = int(os.getenv("JOB_CREATION_RETRIES", 10)) + +@apply_changes_blueprint.route("/apply-changes", methods=['POST']) +@cross_origin() +def apply_changes(): + changes = ApplyChanges() + job_delay, currently_scheduled = changes.apply_changes() + if job_delay <= 1 and currently_scheduled: + message = "There might be previous kubernetes job still present in the namespace. Configuration update will be " \ + f"retried {JOB_CREATION_RETRIES} times. If your configuration won't be updated in a few minutes, make sure that " \ + f"snmp-splunk-connect-for-snmp-inventory job isn't present in your kubernetes deployment namespace and " \ + f"click 'Apply changes' button once again." + else: + message = f"Configuration will be updated in approximately {job_delay} seconds." + result = jsonify({"message": message}) + return result, 200 \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/apply_changes/tasks.py b/backend/SC4SNMP_UI_backend/apply_changes/tasks.py new file mode 100644 index 0000000..2e5bfed --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/tasks.py @@ -0,0 +1,55 @@ +import time +from celery import shared_task +import datetime +from kubernetes import client, config +import yaml +from kubernetes.client import ApiException +from SC4SNMP_UI_backend.apply_changes.kubernetes_job import create_job_object, create_job +from pymongo import MongoClient +import os +from celery.utils.log import get_task_logger + +MONGO_URI = os.getenv("MONGO_URI") +JOB_NAMESPACE = os.getenv("JOB_NAMESPACE", "sc4snmp") +JOB_CREATION_RETRIES = int(os.getenv("JOB_CREATION_RETRIES", 10)) +JOB_CONFIG_PATH = os.getenv("JOB_CONFIG_PATH", "/config/job_config.yaml") +celery_logger = get_task_logger(__name__) + +@shared_task() +def run_job(): + job = None + batch_v1 = None + with open(JOB_CONFIG_PATH, encoding="utf-8") as file: + config_file = yaml.safe_load(file) + if config_file["apiVersion"] != "batch/v1": + raise ValueError("api version is different from batch/v1") + config.load_incluster_config() + batch_v1 = client.BatchV1Api() + job = create_job_object(config_file) + + with MongoClient(MONGO_URI) as connection: + try_creating = True + iteration = 0 + while try_creating and iteration < JOB_CREATION_RETRIES: + # Try creating a new job. If the previous job is still present in the namespace, + # ApiException will we be raised. In that happens wait for 10 seconds and try creating the job again + try: + create_job(batch_v1, job, JOB_NAMESPACE) + try_creating = False + try: + record = list(connection.sc4snmp.config_collection.find())[0] + connection.sc4snmp.config_collection.update_one({"_id": record["_id"]}, + {"$set": {"previous_job_start_time": datetime.datetime.utcnow(), + "currently_scheduled": False}}) + except Exception as e: + celery_logger.info(f"Error occurred while updating job state after job creation: {str(e)}") + except ApiException: + iteration += 1 + if iteration == JOB_CREATION_RETRIES: + try_creating = False + celery_logger.info(f"Kubernetes job was not created. Max retries ({JOB_CREATION_RETRIES}) exceeded.") + record = list(connection.sc4snmp.config_collection.find())[0] + connection.sc4snmp.config_collection.update_one({"_id": record["_id"]}, + {"$set": {"currently_scheduled": False}}) + else: + time.sleep(10) \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/common/backend_ui_conversions.py b/backend/SC4SNMP_UI_backend/common/backend_ui_conversions.py new file mode 100644 index 0000000..620ad70 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/common/backend_ui_conversions.py @@ -0,0 +1,308 @@ +from abc import abstractmethod + + +def camel_case2snake_case(txt): + return ''.join(['_' + i.lower() if i.isupper() + else i for i in txt]).lstrip('_') + + +def snake_case2camel_case(txt): + result = [] + to_upper = False + for i in range(len(txt)): + if txt[i] != "_": + result.append(txt[i].upper()) if to_upper else result.append(txt[i]) + to_upper = False + elif txt[i] == "_" and i < len(txt) - 1: + to_upper = True + + return ''.join(result) + + +def get_group_or_profile_name_from_backend(document: dict): + group_or_profile_name = None + for key in document.keys(): + if key != "_id": + group_or_profile_name = key + return group_or_profile_name + + +class Conversion: + + @abstractmethod + def backend2ui(self, document: dict, **kwargs): + pass + + @abstractmethod + def ui2backend(self, document: dict, **kwargs): + pass + + +def string_value_to_numeric(value: str): + try: + if value.isnumeric(): + return int(value) + elif value.replace(".", "").isnumeric(): + return float(value) + else: + return value + except ValueError: + return value + + +class ProfileConversion(Conversion): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.__backend2ui_conditional_operations = { + "lt": "less than", + "gt": "greater than", + "equals": "equals", + "in": "in", + "regex": "regex" + } + self.__ui2backend_conditional_operations = {} + for key, value in self.__backend2ui_conditional_operations.items(): + self.__ui2backend_conditional_operations[value] = key + + self.__backend2ui_profile_types = { + "field": "smart", + "base": "base", + "walk": "walk" + } + self.__ui2backend_profile_types = {} + for key, value in self.__backend2ui_profile_types.items(): + self.__ui2backend_profile_types[value] = key + + def backend2ui(self, document: dict, **kwargs): + profile_name = get_group_or_profile_name_from_backend(document) + if "profile_in_inventory" not in kwargs.keys(): + raise ValueError("No profile_in_inventory provided") + elif profile_name is None: + raise ValueError("No profile name detected") + else: + profile_in_inventory = kwargs["profile_in_inventory"] + backend_var_binds = document[profile_name]["varBinds"] + var_binds = [] + for vb in backend_var_binds: + new_vb = { + "component": vb[0], + "object": vb[1] if len(vb) >= 2 else "", + "index": '.'.join(map(str, vb[2:])) if len(vb) >= 3 else "", + } + var_binds.append(new_vb) + + if "condition" in document[profile_name]: + backend_condition = document[profile_name]["condition"] + condition_type = self.__backend2ui_profile_types[backend_condition["type"]] + field = backend_condition["field"] if backend_condition["type"] == "field" else "" + patterns = [{"pattern": p} for p in backend_condition["patterns"]] \ + if backend_condition["type"] == "field" else [] + conditions = { + "condition": condition_type, + "field": field, + "patterns": patterns, + "conditions": [] + } + elif "conditions" in document[profile_name]: + conditional = [] + for back_condition in document[profile_name]["conditions"]: + field = back_condition["field"] + operation = self.__backend2ui_conditional_operations[back_condition["operation"]] + negate_operation = back_condition.get("negate_operation", False) + value = [] + if operation == "in": + for v in back_condition["value"]: + value.append(str(v)) + else: + value.append(str(back_condition["value"])) + conditional.append( + {"field": field, "operation": operation, "value": value, "negateOperation": negate_operation} + ) + conditions = { + "condition": "conditional", + "field": "", + "patterns": [], + "conditions": conditional + } + else: + conditions = { + "condition": "standard", + "field": "", + "patterns": [], + "conditions": [] + } + + result = { + "_id": str(document["_id"]), + "profileName": profile_name, + "frequency": document[profile_name].get("frequency", 1), + "conditions": conditions, + "varBinds": var_binds, + "profileInInventory": profile_in_inventory + } + return result + + def ui2backend(self, document: dict, **kwargs): + conditions = None + condition = None + if document['conditions']['condition'] == "smart": + condition = { + 'type': 'field', + 'field': document['conditions']['field'], + 'patterns': [el['pattern'] for el in document['conditions']['patterns']] + } + elif document['conditions']['condition'] == "conditional": + conditions = [] + for ui_condition in document['conditions']['conditions']: + field = ui_condition["field"] + operation = self.__ui2backend_conditional_operations[ui_condition["operation"]] + if operation == "in": + value = [] + for v in ui_condition["value"]: + value.append(string_value_to_numeric(v)) + else: + value = string_value_to_numeric(ui_condition["value"][0]) + if ui_condition["negateOperation"]: + conditions.append( + {"field": field, "operation": operation, "value": value, "negate_operation": True} + ) + else: + conditions.append( + {"field": field, "operation": operation, "value": value} + ) + elif document['conditions']['condition'] != "standard": + condition = { + 'type': document['conditions']['condition'] + } + var_binds = [] + for var_b in document['varBinds']: + single_var_bind = [var_b['component']] + if len(var_b['object']) > 0: + single_var_bind.append(var_b['object']) + if len(var_b['index']) > 0: + single_var_bind += var_b['index'].split(".") + var_binds.append(single_var_bind) + + item = { + document['profileName']: { + 'varBinds': var_binds + } + } + if document['conditions']['condition'] != "walk": + item[document['profileName']].update({'frequency': int(document['frequency'])}) + if condition is not None: + item[document['profileName']].update({'condition': condition}) + if conditions is not None: + item[document['profileName']].update({'conditions': conditions}) + return item + + +class GroupConversion(Conversion): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def backend2ui(self, document: dict, **kwargs): + if "group_in_inventory" in kwargs.keys(): + group_name = get_group_or_profile_name_from_backend(document) + result = { + "_id": str(document["_id"]), + "groupName": group_name, + "groupInInventory": kwargs["group_in_inventory"] + } + return result + else: + raise ValueError("No group_in_inventory provided") + + def ui2backend(self, document: dict, **kwargs): + result = { + document["groupName"]: [] + } + return result + + +class GroupDeviceConversion(Conversion): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.optional_fields = ["port", "version", "community", "secret", "security_engine"] + + def backend2ui(self, document: dict, **kwargs): + if "group_id" in kwargs.keys() and "device_id" in kwargs.keys(): + group_id = kwargs["group_id"] + device_id = kwargs["device_id"] + result = { + "_id": f"{group_id}-{device_id}", + "groupId": str(group_id), + "address": document['address'] + } + for backend_key in self.optional_fields: + ui_key = snake_case2camel_case(backend_key) + if backend_key in document.keys(): + result.update({f'{ui_key}': str(document[backend_key])}) + else: + result.update({f'{ui_key}': ""}) + return result + else: + raise ValueError("No group_id or device_id provided") + + def ui2backend(self, document: dict, **kwargs): + result = { + "address": document["address"] + } + for backend_key in self.optional_fields: + ui_key = snake_case2camel_case(backend_key) + if len(document[ui_key]) > 0: + result.update({f"{backend_key}": str(document[ui_key])}) + if len(document['port']) > 0: + result.update({"port": int(document['port'])}) + return result + + +class InventoryConversion(Conversion): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def ui2backend(self, document: dict, **kwargs): + if "delete" in kwargs.keys(): + profiles = "" + for i in range(len(document['profiles'])): + profiles += f"{document['profiles'][i]}" + if i < len(document['profiles'])-1: + profiles += ";" + result = { + 'address': document['address'], + 'port': int(document['port']), + 'version': document['version'], + 'community': document['community'], + 'secret': document['secret'], + 'security_engine': document['securityEngine'], + 'walk_interval': document['walkInterval'], + 'profiles': profiles, + 'smart_profiles': document['smartProfiles'], + 'delete': kwargs['delete'] + } + return result + else: + raise ValueError("No delete provided") + + def backend2ui(self, document: dict, **kwargs): + if "inventory_type" not in kwargs.keys(): + raise ValueError("No inventory_type provided") + profiles_mongo = document['profiles'] + if len(profiles_mongo) > 0: + profiles = profiles_mongo.split(";") + else: + profiles = [] + result = { + '_id': str(document["_id"]), + 'inventoryType': kwargs['inventory_type'], + 'address': document['address'], + 'port': str(document['port']), + 'version': document['version'], + 'community': document['community'], + 'secret': document['secret'], + 'securityEngine': document['security_engine'], + 'walkInterval': document['walk_interval'], + 'profiles': profiles, + 'smartProfiles': document['smart_profiles'] + } + return result \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/common/conversions.py b/backend/SC4SNMP_UI_backend/common/conversions.py deleted file mode 100644 index d17dc13..0000000 --- a/backend/SC4SNMP_UI_backend/common/conversions.py +++ /dev/null @@ -1,226 +0,0 @@ -from abc import abstractmethod - - -def camel_case2snake_case(txt): - return ''.join(['_' + i.lower() if i.isupper() - else i for i in txt]).lstrip('_') - - -def snake_case2camel_case(txt): - result = [] - to_upper = False - for i in range(len(txt)): - if txt[i] != "_": - result.append(txt[i].upper()) if to_upper else result.append(txt[i]) - to_upper = False - elif txt[i] == "_" and i < len(txt) - 1: - to_upper = True - continue - - return ''.join(result) - - -def get_group_name_from_backend(document: dict): - group_name = None - for key in document.keys(): - if key != "_id": - group_name = key - return group_name - - -class Conversion: - @abstractmethod - def _ui2backend_map(self, document: dict, **kwargs): - pass - - @abstractmethod - def _backend2ui_map(self, document: dict, **kwargs): - pass - - def backend2ui(self, document: dict, **kwargs): - return self._backend2ui_map(document, **kwargs) - - def ui2backend(self, document: dict, **kwargs): - return self._ui2backend_map(document, **kwargs) - - -class ProfileConversion(Conversion): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def _backend2ui_map(self, document: dict, **kwargs): - profile_name = None - for key in document.keys(): - if key != "_id": - profile_name = key - if profile_name is None: - raise ValueError("No profile name detected") - else: - backend_var_binds = document[profile_name]["varBinds"] - var_binds = [] - for vb in backend_var_binds: - new_vb = { - "family": vb[0], - "category": vb[1] if len(vb) >= 2 else "", - "index": str(vb[2]) if len(vb) == 3 else "", - } - var_binds.append(new_vb) - - if "condition" in document[profile_name]: - backend_condition = document[profile_name]["condition"] - condition_type = backend_condition["type"] - field = backend_condition["field"] if condition_type == "field" else "" - patterns = [{"pattern": p} for p in backend_condition["patterns"]] \ - if condition_type == "field" else None - conditions = { - "condition": condition_type, - "field": field, - "patterns": patterns - } - else: - conditions = { - "condition": "None", - "field": "", - "patterns": None - } - result = { - "_id": str(document["_id"]), - "profileName": profile_name, - "frequency": document[profile_name]["frequency"], - "conditions": conditions, - "varBinds": var_binds - } - return result - - def _ui2backend_map(self, document: dict, **kwargs): - if document['conditions']['condition'] == "field": - conditions = { - 'type': 'field', - 'field': document['conditions']['field'], - 'patterns': [el['pattern'] for el in document['conditions']['patterns']] - } - elif document['conditions']['condition'] == "None": - conditions = None - else: - conditions = { - 'type': document['conditions']['condition'] - } - var_binds = [] - for var_b in document['varBinds']: - single_var_bind = [var_b['family']] - if len(var_b['category']) > 0: - single_var_bind.append(var_b['category']) - if len(var_b['index']) > 0: - single_var_bind.append(int(var_b['index'])) - var_binds.append(single_var_bind) - - item = { - document['profileName']: { - 'frequency': int(document['frequency']), - 'varBinds': var_binds - } - } - if conditions is not None: - item[document['profileName']].update({'condition': conditions}) - return item - - -class GroupConversion(Conversion): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def _backend2ui_map(self, document: dict, **kwargs): - group_name = get_group_name_from_backend(document) - result = { - "_id": str(document["_id"]), - "groupName": group_name - } - return result - - def _ui2backend_map(self, document: dict, **kwargs): - result = { - document["groupName"]: [] - } - return result - - -class GroupDeviceConversion(Conversion): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.optional_fields = ["port", "version", "community", "secret", "security_engine"] - - def _backend2ui_map(self, document: dict, **kwargs): - if "group_id" in kwargs.keys() and "device_id" in kwargs.keys(): - group_id = kwargs["group_id"] - device_id = kwargs["device_id"] - result = { - "_id": f"{group_id}-{device_id}", - "groupId": str(group_id), - "address": document['address'] - } - for backend_key in self.optional_fields: - ui_key = snake_case2camel_case(backend_key) - if backend_key in document.keys(): - result.update({f'{ui_key}': str(document[backend_key])}) - else: - result.update({f'{ui_key}': ""}) - return result - else: - raise ValueError("No group_id or device_id provided") - - def _ui2backend_map(self, document: dict, **kwargs): - result = { - "address": document["address"] - } - for backend_key in self.optional_fields: - ui_key = snake_case2camel_case(backend_key) - if len(document[ui_key]) > 0: - result.update({f"{backend_key}": str(document[ui_key])}) - if len(document['port']) > 0: - result.update({"port": int(document['port'])}) - return result - - -class InventoryConversion(Conversion): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def _ui2backend_map(self, document: dict, **kwargs): - if "delete" in kwargs.keys(): - profiles = "" - for i in range(len(document['profiles'])): - profiles += f"{document['profiles'][i]}" - if i < len(document['profiles'])-1: - profiles += ";" - result = { - 'address': document['address'], - 'port': int(document['port']), - 'version': document['version'], - 'community': document['community'], - 'secret': document['secret'], - 'security_engine': document['securityEngine'], - 'walk_interval': document['walkInterval'], - 'profiles': profiles, - 'smart_profiles': document['smartProfiles'], - 'delete': kwargs['delete'] - } - return result - else: - raise ValueError("No delete provided") - - def _backend2ui_map(self, document: dict, **kwargs): - profiles_mongo = document['profiles'] - profiles = profiles_mongo.split(";") - result = { - '_id': str(document["_id"]), - 'address': document['address'], - 'port': str(document['port']), - 'version': document['version'], - 'community': document['community'], - 'secret': document['secret'], - 'securityEngine': document['security_engine'], - 'walkInterval': document['walk_interval'], - 'profiles': profiles, - 'smartProfiles': document['smart_profiles'] - } - return result diff --git a/backend/SC4SNMP_UI_backend/common/inventory_utils.py b/backend/SC4SNMP_UI_backend/common/inventory_utils.py new file mode 100644 index 0000000..853a234 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/common/inventory_utils.py @@ -0,0 +1,266 @@ +from SC4SNMP_UI_backend import mongo_client +from enum import Enum +from typing import Callable +from bson import ObjectId +from SC4SNMP_UI_backend.common.backend_ui_conversions import InventoryConversion + +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui +inventory_conversion = InventoryConversion() + +class HostConfiguration(Enum): + SINGLE = 1 + GROUP = 2 + +def get_inventory_type(document): + if list(mongo_groups.find({document["address"]: {"$exists": 1}})): + result = "Group" + else: + result = "Host" + return result + +def update_profiles_in_inventory(profile_to_search: str, process_record: Callable, **kwargs): + """ + When profile is edited, then in some cases inventory records using this profile should be updated. + + :param profile_to_search: name of the profile which should be updated in the inventory + :param process_record: function to process profiles in record. It should accept index of the profile to update, + the whole record dictionary and kwargs passed by user. + :param kwargs: additional variables which user can pass to process_record function + :return: + """ + inventory_records = list(mongo_inventory.find({"profiles": {"$regex": f'.*{profile_to_search}.*'}, "delete": False})) + for record in inventory_records: + record_id = record["_id"] + record_updated = inventory_conversion.backend2ui(record, inventory_type=None) # inventory_type isn't used + index_to_update = record_updated["profiles"].index(profile_to_search) + record_updated = process_record(index_to_update, record_updated, kwargs) + record_updated = inventory_conversion.ui2backend(record_updated, delete=False) + mongo_inventory.update_one({"_id": ObjectId(record_id)}, {"$set": record_updated}) + return inventory_records + + +class HandleNewDevice: + def __init__(self, mongo_groups, mongo_inventory): + self._mongo_groups = mongo_groups + self._mongo_inventory = mongo_inventory + + def _is_host_in_group(self, address, port) -> (bool, str, str): + groups_from_inventory = list(self._mongo_inventory.find({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False})) + break_occurred = False + + host_in_group = False + group_id = None + device_id = None + group_name = None + + for group_config in groups_from_inventory: + group_config_name = group_config["address"] + group_name = group_config_name + group_port = group_config["port"] + group = list(self._mongo_groups.find({group_config_name: {"$exists": 1}})) + if len(group) > 0: + group = group[0] + for i, device in enumerate(group[group_config_name]): + device_port = device.get("port", group_port) + if device["address"] == address and int(device_port) == int(port): + host_in_group = True + group_id = str(group["_id"]) + device_id = i + break_occurred = True + break + if break_occurred: + break + + return host_in_group, group_id, device_id, group_name + + def _is_host_configured(self, address: str, port: str): + existing_inventory_record = list(self._mongo_inventory.find({'address': address, 'port': int(port), "delete": False})) + deleted_inventory_record = list(self._mongo_inventory.find({'address': address, 'port': int(port), "delete": True})) + + host_configured = False + host_configuration = None + existing_id_string = None + group_name = None + + if len(existing_inventory_record) > 0: + host_configured = True + host_configuration = HostConfiguration.SINGLE + existing_id_string = str(existing_inventory_record[0]["_id"]) + else: + host_in_group, group_id, device_id, group_name = self._is_host_in_group(address, port) + if host_in_group: + host_configured = True + host_configuration = HostConfiguration.GROUP + existing_id_string = f"{group_id}-{device_id}" + + return host_configured, deleted_inventory_record, host_configuration, existing_id_string, group_name + + def add_single_host(self, address, port, device_object=None, add: bool=True): + host_configured, deleted_inventory_record, host_configuration, existing_id_string, group_name = \ + self._is_host_configured(address, port) + groups = list(mongo_groups.find({address: {"$exists": True}})) + if host_configured: + host_location_message = "in the inventory" if host_configuration == HostConfiguration.SINGLE else \ + f"in group {group_name}" + message = f"Host {address}:{port} already exists {host_location_message}. Record was not added." + host_added = False + elif groups: + message = f"There is a group with the same name configured. Record {address} can't be added as a single host." + host_added = False + else: + if add and device_object is not None: + self._mongo_inventory.insert_one(device_object) + if len(deleted_inventory_record) > 0: + self._mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) + message = None + host_added = True + return host_added, message + + def edit_single_host(self, address: str, port: str, host_id: str, device_object=None, edit: bool=True): + host_configured, deleted_inventory_record, host_configuration, existing_id_string, group_name =\ + self._is_host_configured(address, port) + + if not host_configured or (host_configured and host_id == existing_id_string): + message = "success" + host_edited = True + if edit and device_object is not None: + host_id = ObjectId(host_id) + previous_device_object = list(self._mongo_inventory.find({"_id": host_id}))[0] + if int(port) != int(previous_device_object["port"]) or address != previous_device_object["address"]: + host_added, add_message = self.add_single_host(address, port, device_object, True) + if not host_added: + host_edited = False + message = add_message + else: + self._mongo_inventory.update_one({"_id": ObjectId(host_id)}, {"$set": {"delete": True}}) + message = "Address or port was edited which resulted in deleting the old device and creating " \ + "the new one at the end of the list." + else: + self._mongo_inventory.update_one({"_id": host_id}, {"$set": device_object}) + if len(deleted_inventory_record) > 0: + self._mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) + else: + host_location_message = "in the inventory" if host_configuration == HostConfiguration.SINGLE else \ + f"in group {group_name}" + message = f"Host {address}:{port} already exists {host_location_message}. Record was not edited." + host_edited = False + return host_edited, message + + def add_group_host(self, group_name: str, group_id: ObjectId, device_object: dict): + group_from_inventory = list(self._mongo_inventory.find({"address": group_name, "delete": False})) + group = list(self._mongo_groups.find({"_id": group_id}, {"_id": 0})) + group = group[0] + address = device_object["address"] + port = str(device_object.get("port", "")) + if len(group_from_inventory) > 0: + device_port = port if len(port)>0 else str(group_from_inventory[0]["port"]) + host_added, message = self.add_single_host(address, device_port, add=False) + else: + new_device_port = int(port) if len(port) > 0 else -1 + host_added = True + message = None + for device in group[group_name]: + old_device_port = device.get('port', -1) + if device["address"] == address and old_device_port == new_device_port: + message = f"Host {address}:{port} already exists in group {group_name}. Record was not added." + host_added = False + if host_added: + group[group_name].append(device_object) + new_values = {"$set": group} + self._mongo_groups.update_one({"_id": group_id}, new_values) + return host_added, message + + def edit_group_host(self, group_name: str, group_id: ObjectId, device_id: str, device_object: dict): + group_from_inventory = list(self._mongo_inventory.find({"address": group_name, "delete": False})) + group = list(self._mongo_groups.find({"_id": group_id})) + group = group[0] + address = device_object["address"] + port = str(device_object.get("port", "")) + if len(group_from_inventory) > 0: + device_port = port if len(port) > 0 else str(group_from_inventory[0]["port"]) + host_edited, message = self.edit_single_host(address, device_port, device_id, edit=False) + else: + new_device_port = int(port) if len(port) > 0 else -1 + host_edited = True + message = None + for i, device in enumerate(group[group_name]): + old_device_port = device.get('port', -1) + old_device_id = f"{i}" + if device["address"] == address and old_device_port == new_device_port and old_device_id != device_id: + message = f"Host {address}:{port} already exists in group {group_name}. Record was not edited." + host_edited = False + if host_edited: + group[group_name][int(device_id)] = device_object + new_values = {"$set": group} + mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) + return host_edited, message + + def add_group_to_inventory(self, group_name: str, group_port: str, group_object=None, add: bool = True): + group_added = True + message = None + existing_inventory_record = list(self._mongo_inventory.find({'address': group_name, "delete": False})) + deleted_inventory_record = list(self._mongo_inventory.find({'address': group_name, "delete": True})) + group = list(self._mongo_groups.find({group_name: {"$exists": 1}})) + if len(group) == 0: + group_added = False + message = f"Group {group_name} doesn't exist in the configuration. Record was not added." + elif len(existing_inventory_record) > 0: + group_added = False + message = f"Group {group_name} has already been added to the inventory. Record was not added." + else: + group = group[0] + devices_in_group = dict() + for i, device in enumerate(group[group_name]): + device_port = str(device.get("port", group_port)) + address = device["address"] + device_added, message = self.add_single_host(address, device_port, add=False) + if not device_added: + group_added = False + message = f"Can't add group {group_name}. {message}" + break + else: + if f"{address}:{device_port}" in devices_in_group: + message = f"Can't add group {group_name}. Device {address}:{device_port} was configured multiple times in this group. Record was not added." + group_added = False + break + else: + devices_in_group[f"{address}:{device_port}"] = 1 + + if group_added and add and group_object is not None: + if len(deleted_inventory_record) > 0: + self._mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) + self._mongo_inventory.insert_one(group_object) + return group_added, message + + def edit_group_in_inventory(self, group_name: str, group_id: str, group_object=None, edit: bool = True): + group_id = ObjectId(group_id) + existing_inventory_record = list(self._mongo_inventory.find({'address': group_name, "delete": False})) + deleted_inventory_record = list(self._mongo_inventory.find({'address': group_name, "delete": True})) + group = list(self._mongo_groups.find({group_name: {"$exists": 1}})) + if len(group) == 0: + group_edited = False + message = f"Group {group_name} doesn't exist in the configuration. Record was not edited." + elif len(existing_inventory_record) == 0 or (len(existing_inventory_record) > 0 and existing_inventory_record[0]["_id"] == group_id): + message = "success" + group_edited = True + if edit and group_object is not None: + previous_group_object = list(self._mongo_inventory.find({"_id": group_id}))[0] + if group_name != previous_group_object["address"]: + group_added, add_message = self.add_group_to_inventory(group_name, str(group_object["port"]), group_object, True) + if not group_added: + group_edited = False + message = add_message + else: + self._mongo_inventory.update_one({"_id": ObjectId(group_id)}, {"$set": {"delete": True}}) + message = "Group name was edited which resulted in deleting the old group and creating new " \ + "one at the end of the list." + else: + self._mongo_inventory.update_one({"_id": group_id}, {"$set": group_object}) + if len(deleted_inventory_record) > 0: + self._mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) + else: + message = f"Group with name {group_name} already exists. Record was not edited." + group_edited = False + + return group_edited, message diff --git a/backend/SC4SNMP_UI_backend/groups/__init__.py b/backend/SC4SNMP_UI_backend/groups/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/SC4SNMP_UI_backend/groups/routes.py b/backend/SC4SNMP_UI_backend/groups/routes.py new file mode 100644 index 0000000..cf27d5d --- /dev/null +++ b/backend/SC4SNMP_UI_backend/groups/routes.py @@ -0,0 +1,175 @@ +from bson import ObjectId +from flask import request, Blueprint, jsonify +from flask_cors import cross_origin +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.common.backend_ui_conversions import GroupConversion, GroupDeviceConversion, InventoryConversion, \ + get_group_or_profile_name_from_backend +from copy import copy +from SC4SNMP_UI_backend.common.inventory_utils import HandleNewDevice, get_inventory_type + +groups_blueprint = Blueprint('groups_blueprint', __name__) + +group_conversion = GroupConversion() +group_device_conversion = GroupDeviceConversion() +inventory_conversion = InventoryConversion() +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui + +@groups_blueprint.route('/groups') +@cross_origin() +def get_groups_list(): + groups = mongo_groups.find() + groups_list = [] + for gr in list(groups): + group_name = get_group_or_profile_name_from_backend(gr) + group_in_inventory = True if list(mongo_inventory.find({"address": group_name, "delete": False})) else False + groups_list.append(group_conversion.backend2ui(gr, group_in_inventory=group_in_inventory)) + return jsonify(groups_list) + + +@groups_blueprint.route('/groups/add', methods=['POST']) +@cross_origin() +def add_group_record(): + group_obj = request.json + same_name_groups = list(mongo_groups.find({f"{group_obj['groupName']}": {"$exists": True}})) + if len(same_name_groups) > 0: + result = jsonify( + {"message": f"Group with name {group_obj['groupName']} already exists. Group was not added."}), 400 + elif list(mongo_inventory.find({"address": group_obj['groupName'], "delete": False})): + result = jsonify( + {"message": f"In the inventory there is a record with name {group_obj['groupName']}. Group was not added."} + ), 400 + else: + group_obj = group_conversion.ui2backend(group_obj) + mongo_groups.insert_one(group_obj) + result = jsonify("success") + return result + + +@groups_blueprint.route('/groups/update/', methods=['POST']) +@cross_origin() +def update_group(group_id): + group_obj = request.json + same_name_groups = list(mongo_groups.find({f"{group_obj['groupName']}": {"$exists": True}})) + if len(same_name_groups) > 0: + result = jsonify( + {"message": f"Group with name {group_obj['groupName']} already exists. Group was not edited."}), 400 + elif list(mongo_inventory.find({"address": group_obj['groupName'], "delete": False})): + result = jsonify( + {"message": f"In the inventory there is a record with name {group_obj['groupName']}. Group was not edited."} + ), 400 + else: + old_group = list(mongo_groups.find({'_id': ObjectId(group_id)}))[0] + old_group_name = get_group_or_profile_name_from_backend(old_group) + mongo_groups.update_one({'_id': old_group['_id']}, {"$rename": {f"{old_group_name}": f"{group_obj['groupName']}"}}) + + # Rename corresponding group in the inventory + mongo_inventory.update_one({"address": old_group_name}, {"$set": {"address": group_obj['groupName']}}) + result = jsonify({"message": f"{old_group_name} was also renamed to {group_obj['groupName']} in the inventory"}), 200 + return result + + +@groups_blueprint.route('/groups/delete/', methods=['POST']) +@cross_origin() +def delete_group_and_devices(group_id): + group = list(mongo_groups.find({'_id': ObjectId(group_id)}))[0] + group_name = get_group_or_profile_name_from_backend(group) + configured_in_inventory = False + with mongo_client.start_session() as session: + with session.start_transaction(): + mongo_groups.delete_one({'_id': ObjectId(group_id)}) + if list(mongo_inventory.find({"address": group_name})): + configured_in_inventory = True + mongo_inventory.update_one({"address": group_name}, {"$set": {"delete": True}}) + if configured_in_inventory: + message = f"Group {group_name} was deleted. It was also deleted from the inventory." + else: + message = f"Group {group_name} was deleted." + return jsonify({"message": message}), 200 + + +@groups_blueprint.route('/group//devices/count') +@cross_origin() +def get_devices_count_for_group(group_id): + group = list(mongo_groups.find({"_id": ObjectId(group_id)}))[0] + group_name = get_group_or_profile_name_from_backend(group) + total_count = len(group[group_name]) + return jsonify(total_count) + + +@groups_blueprint.route('/group//devices//') +@cross_origin() +def get_devices_of_group(group_id, page_num, dev_per_page): + page_num = int(page_num) + dev_per_page = int(dev_per_page) + skips = dev_per_page * (page_num - 1) + group = list(mongo_groups.find({"_id": ObjectId(group_id)}))[0] + + group_name = get_group_or_profile_name_from_backend(group) + devices_list = [] + for i, device in enumerate(group[group_name]): + devices_list.append(group_device_conversion.backend2ui(device, group_id=group_id, device_id=copy(i))) + devices_list = devices_list[skips:skips+dev_per_page] + return jsonify(devices_list) + + +@groups_blueprint.route('/group/inventory/') +@cross_origin() +def get_group_config_from_inventory(group_name): + group_from_inventory = list(mongo_inventory.find({"address": group_name, "delete": False})) + if len(group_from_inventory) > 0: + inventory_type = get_inventory_type(group_from_inventory[0]) + result = jsonify(inventory_conversion.backend2ui(group_from_inventory[0], inventory_type=inventory_type)), 200 + else: + result = "", 204 + return result + + +@groups_blueprint.route('/devices/add', methods=['POST']) +@cross_origin() +def add_device_to_group(): + device_obj = request.json + group_id = device_obj["groupId"] + group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] + group_name = get_group_or_profile_name_from_backend(group) + device_obj = group_device_conversion.ui2backend(device_obj) + handler = HandleNewDevice(mongo_groups, mongo_inventory) + host_added, message = handler.add_group_host(group_name, ObjectId(group_id), device_obj) + if host_added: + result = jsonify("success"), 200 + else: + result = jsonify({"message": message}), 400 + return result + + +@groups_blueprint.route('/devices/update/', methods=['POST']) +@cross_origin() +def update_device_from_group(device_id): + device_obj = request.json + group_id = device_id.split("-")[0] + device_id = device_id.split("-")[1] + group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] + device_obj = group_device_conversion.ui2backend(device_obj) + group_name = get_group_or_profile_name_from_backend(group) + handler = HandleNewDevice(mongo_groups, mongo_inventory) + + host_edited, message = handler.edit_group_host(group_name, ObjectId(group_id), device_id, device_obj, ) + if host_edited: + result = jsonify("success"), 200 + else: + result = jsonify({"message": message}), 400 + return result + + +@groups_blueprint.route('/devices/delete/', methods=['POST']) +@cross_origin() +def delete_device_from_group_record(device_id: str): + group_id = device_id.split("-")[0] + device_id = device_id.split("-")[1] + group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] + group_name = get_group_or_profile_name_from_backend(group) + removed_device = group[group_name].pop(int(device_id)) + device_name = f"{removed_device['address']}:{removed_device.get('port','')}" + new_values = {"$set": group} + mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) + return jsonify({"message": f"Device {device_name} from group {group_name} was deleted."}), 200 \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/inventory/__init__.py b/backend/SC4SNMP_UI_backend/inventory/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/SC4SNMP_UI_backend/inventory/routes.py b/backend/SC4SNMP_UI_backend/inventory/routes.py new file mode 100644 index 0000000..6966019 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/inventory/routes.py @@ -0,0 +1,93 @@ +from bson import ObjectId +from flask import request, Blueprint, jsonify +from flask_cors import cross_origin +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.common.backend_ui_conversions import InventoryConversion +from SC4SNMP_UI_backend.common.inventory_utils import HandleNewDevice, get_inventory_type + +inventory_blueprint = Blueprint('inventory_blueprint', __name__) + +inventory_conversion = InventoryConversion() +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui + +@inventory_blueprint.route('/inventory//') +@cross_origin() +def get_inventory_list(page_num, dev_per_page): + page_num = int(page_num) + dev_per_page = int(dev_per_page) + skips = dev_per_page * (page_num - 1) + + inventory = list(mongo_inventory.find({"delete": False}).skip(skips).limit(dev_per_page)) + inventory_list = [] + for inv in inventory: + inventory_type = get_inventory_type(inv) + inventory_list.append(inventory_conversion.backend2ui(inv, inventory_type=inventory_type)) + return jsonify(inventory_list) + + +@inventory_blueprint.route('/inventory/count') +@cross_origin() +def get_inventory_count(): + total_count = mongo_inventory.count_documents({"delete": False}) + return jsonify(total_count) + + +@inventory_blueprint.route('/inventory/add', methods=['POST']) +@cross_origin() +def add_inventory_record(): + inventory_obj = request.json + inventory_type = inventory_obj["inventoryType"] + inventory_obj = inventory_conversion.ui2backend(inventory_obj, delete=False) + handler = HandleNewDevice(mongo_groups, mongo_inventory) + if inventory_type == "Host": + record_added, message = handler.add_single_host(inventory_obj["address"], str(inventory_obj["port"]), + inventory_obj, True) + else: + record_added, message = handler.add_group_to_inventory(inventory_obj["address"], str(inventory_obj["port"]), + inventory_obj, True) + if record_added and message is not None: + result = jsonify({"message": message}), 200 + elif record_added: + result = jsonify("success"), 200 + else: + result = jsonify({"message": message}), 400 + return result + + +@inventory_blueprint.route('/inventory/delete/', methods=['POST']) +@cross_origin() +def delete_inventory_record(inventory_id): + mongo_inventory.update_one({"_id": ObjectId(inventory_id)}, {"$set": {"delete": True}}) + inventory_item = list(mongo_inventory.find({"_id": ObjectId(inventory_id)}))[0] + address = inventory_item['address'] + port = f":{inventory_item['port']}" if address[0].isnumeric() else "" + return jsonify({"message": f"{address}{port} was deleted."}), 200 + + +@inventory_blueprint.route('/inventory/update/', methods=['POST']) +@cross_origin() +def update_inventory_record(inventory_id): + inventory_obj = request.json + inventory_type = inventory_obj["inventoryType"] + inventory_obj = inventory_conversion.ui2backend(inventory_obj, delete=False) + current_inventory = list(mongo_inventory.find({"_id": ObjectId(inventory_id)}))[0] + current_inventory_type = get_inventory_type(current_inventory) + handler = HandleNewDevice(mongo_groups, mongo_inventory) + + if inventory_type != current_inventory_type: + result = jsonify({"message": "Can't edit single host to the group or group to the single host"}), 400 + else: + if inventory_type == "Host": + record_edited, message = handler.edit_single_host(inventory_obj["address"], str(inventory_obj["port"]), + str(inventory_id), inventory_obj, True) + else: + record_edited, message = handler.edit_group_in_inventory(inventory_obj["address"], str(inventory_id), inventory_obj, True) + if record_edited: + if message == "success" or message is None: + result = jsonify("success"), 200 + else: + result = jsonify({"message": message}), 200 + else: + result = jsonify({"message": message}), 400 + return result diff --git a/backend/SC4SNMP_UI_backend/profiles/__init__.py b/backend/SC4SNMP_UI_backend/profiles/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/SC4SNMP_UI_backend/profiles/routes.py b/backend/SC4SNMP_UI_backend/profiles/routes.py new file mode 100644 index 0000000..cdc0e4c --- /dev/null +++ b/backend/SC4SNMP_UI_backend/profiles/routes.py @@ -0,0 +1,130 @@ +from bson import ObjectId +from flask import request, Blueprint, jsonify +from flask_cors import cross_origin +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.common.backend_ui_conversions import ProfileConversion, get_group_or_profile_name_from_backend +from SC4SNMP_UI_backend.common.inventory_utils import update_profiles_in_inventory + +profiles_blueprint = Blueprint('profiles_blueprint', __name__) + +profile_conversion = ProfileConversion() +mongo_profiles = mongo_client.sc4snmp.profiles_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui + +# @cross_origin(origins='*', headers=['access-control-allow-origin', 'Content-Type']) +@profiles_blueprint.route('/profiles/names') +@cross_origin() +def get_profile_names(): + profiles = list(mongo_profiles.find()) + profiles_list = [] + for pr in profiles: + converted = profile_conversion.backend2ui(pr, profile_in_inventory=True) + if converted['conditions']['condition'] not in ['mandatory', 'base']: + profiles_list.append(converted) + return jsonify([el["profileName"] for el in profiles_list]) + +@profiles_blueprint.route('/profiles/count') +@cross_origin() +def get_profiles_count(): + total_count = mongo_profiles.count_documents({}) + return jsonify(total_count) + +@profiles_blueprint.route('/profiles//') +@cross_origin() +def get_profiles_list(page_num, prof_per_page): + page_num = int(page_num) + prof_per_page = int(prof_per_page) + skips = prof_per_page * (page_num - 1) + + profiles = list(mongo_profiles.find().skip(skips).limit(prof_per_page)) + profiles_list = [] + for pr in profiles: + profile_name = get_group_or_profile_name_from_backend(pr) + profile_in_inventory = True if list(mongo_inventory.find({"profiles": {"$regex": f'.*{profile_name}.*'}, + "delete": False})) else False + converted = profile_conversion.backend2ui(pr, profile_in_inventory=profile_in_inventory) + if converted['conditions']['condition'] not in ['mandatory']: + profiles_list.append(converted) + return jsonify(profiles_list) + + +@profiles_blueprint.route('/profiles') +@cross_origin() +def get_all_profiles_list(): + profiles = list(mongo_profiles.find()) + profiles_list = [] + for pr in profiles: + converted = profile_conversion.backend2ui(pr, profile_in_inventory=True) + if converted['conditions']['condition'] not in ['mandatory']: + profiles_list.append(converted) + return jsonify(profiles_list) + + +@profiles_blueprint.route('/profiles/add', methods=['POST']) +@cross_origin() +def add_profile_record(): + profile_obj = request.json + same_name_profiles = list(mongo_profiles.find({f"{profile_obj['profileName']}": {"$exists": True}})) + if len(same_name_profiles) > 0: + result = jsonify( + {"message": f"Profile with name {profile_obj['profileName']} already exists. Profile was not added."}), 400 + else: + profile_obj = profile_conversion.ui2backend(profile_obj) + mongo_profiles.insert_one(profile_obj) + result = jsonify("success") + return result + +@profiles_blueprint.route('/profiles/delete/', methods=['POST']) +@cross_origin() +def delete_profile_record(profile_id): + profile = list(mongo_profiles.find({'_id': ObjectId(profile_id)}, {"_id": 0}))[0] + profile_name = list(profile.keys())[0] + + # Find records from inventory where this profile was used. + def delete_profile(index, record_to_update, kwargs): + record_to_update["profiles"].pop(index) + return record_to_update + inventory_records = update_profiles_in_inventory(profile_name, delete_profile) + if inventory_records: + message = f"Profile {profile_name} was deleted. It was also deleted from some inventory records." + else: + message = f"Profile {profile_name} was deleted." + + mongo_profiles.delete_one({'_id': ObjectId(profile_id)}) + return jsonify({"message": message}), 200 + + +@profiles_blueprint.route('/profiles/update/', methods=['POST']) +@cross_origin() +def update_profile_record(profile_id): + profile_obj = request.json + new_profile_name = profile_obj['profileName'] + + same_name_profiles = list(mongo_profiles.find({f"{new_profile_name}": {"$exists": True}, "_id": {"$ne": ObjectId(profile_id)}})) + if len(same_name_profiles) > 0: + return jsonify( + {"message": f"Profile with name {new_profile_name} already exists. Profile was not edited."}), 400 + + profile_obj = profile_conversion.ui2backend(profile_obj) + + old_profile = list(mongo_profiles.find({'_id': ObjectId(profile_id)}, {"_id": 0}))[0] + old_profile_name = list(old_profile.keys())[0] + + # If profile name was changed update it and also update all inventory records where this profile is used + if old_profile_name != new_profile_name: + mongo_profiles.update_one({'_id': ObjectId(profile_id)}, + {"$rename": {f"{old_profile_name}": f"{new_profile_name}"}}) + + def update_name(index, record_to_update, kwargs): + record_to_update["profiles"][index] = kwargs["new_name"] + return record_to_update + update_profiles_in_inventory(old_profile_name, update_name, new_name=new_profile_name) + + result = jsonify({"message": f"If {old_profile_name} was used in some records in the inventory," + f" it was updated to {new_profile_name}"}), 200 + else: + result = jsonify("success"), 200 + + mongo_profiles.update_one({'_id': ObjectId(profile_id)}, + {"$set": {new_profile_name: profile_obj[new_profile_name]}}) + return result \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/ui_handling/helpers.py b/backend/SC4SNMP_UI_backend/ui_handling/helpers.py deleted file mode 100644 index cd6cb91..0000000 --- a/backend/SC4SNMP_UI_backend/ui_handling/helpers.py +++ /dev/null @@ -1,97 +0,0 @@ -from SC4SNMP_UI_backend import mongo_client -from enum import Enum -from typing import Callable -from bson import ObjectId -from flask import jsonify -from SC4SNMP_UI_backend.common.conversions import InventoryConversion - -mongo_groups = mongo_client.sc4snmp.groups_ui -mongo_inventory = mongo_client.sc4snmp.inventory_ui -inventory_conversion = InventoryConversion() - -class InventoryAddEdit(Enum): - ADD = 1 - EDIT = 2 - - -def check_if_inventory_can_be_added(inventory_obj, change_type: InventoryAddEdit, inventory_id): - """ - Before updating or adding new inventory check if it can be done. For example users shouldn't add new - inventory if the same inventory already exists. - - :param inventory_obj: new inventory object to be added/updated - :param change_type: InventoryAddEdit.EDIT or InventoryAddEdit.ADD - :param inventory_id: id of the inventory to be edited - :return: - """ - - address = inventory_obj['address'] - port = inventory_obj['port'] - message = "added" if change_type == InventoryAddEdit.ADD else "edited" - inventory_id = ObjectId(inventory_id) if change_type == InventoryAddEdit.EDIT else None - - check_duplicates = False - if address[0].isdigit(): - # record is a single host - existing_inventory_record = list(mongo_inventory.find({'address': address, 'port': port, "delete": False})) - - # check if there is any record for this device which has been assigned to be deleted - deleted_inventory_record = list(mongo_inventory.find({'address': address, 'port': port, "delete": True})) - identifier = f"{address}:{port}" - check_duplicates = True - else: - # record is a group - existing_inventory_record = list(mongo_inventory.find({'address': address, "delete": False})) - - # check if there is any record for this group which has been assigned to be deleted - deleted_inventory_record = list(mongo_inventory.find({'address': address, "delete": True})) - identifier = address - group = list(mongo_groups.find({address: {"$exists": 1}})) - if len(group) == 0: - result = jsonify({"message": f"There is no group {address} configured. Record was not {message}."}), 400 - else: - check_duplicates = True - - if check_duplicates: - # check if the same record already exist in the inventory - if len(existing_inventory_record) == 0: - make_change = True - elif existing_inventory_record[0]["_id"] == inventory_id and change_type == InventoryAddEdit.EDIT: - make_change = True - else: - make_change = False - - if make_change: - if change_type == InventoryAddEdit.ADD: - mongo_inventory.insert_one(inventory_obj) - else: - mongo_inventory.update_one({"_id": inventory_id}, {"$set": inventory_obj}) - - if len(deleted_inventory_record) > 0: - mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) - result = jsonify("success"), 200 - else: - result = jsonify( - {"message": f"Inventory record for {identifier} already exists. Record was not {message}."}), 400 - - return result - - -def update_profiles_in_inventory(profile_to_search: str, process_record: Callable, **kwargs): - """ - When profile is edited, then in some cases inventory records using this profile should be updated. - - :param profile_to_search: name of the profile which should be updated in the inventory - :param process_record: function to process profiles in record. It should accept index of profile to update, - whole record dictionary and kwargs passed by user. - :param kwargs: additional variables which user can pass to process_record function - :return: - """ - inventory_records = list(mongo_inventory.find({"profiles": {"$regex": f'.*{profile_to_search}.*'}})) - for record in inventory_records: - record_id = record["_id"] - record_updated = inventory_conversion.backend2ui(record) - index_to_update = record_updated["profiles"].index(profile_to_search) - record_updated = process_record(index_to_update, record_updated, kwargs) - record_updated = inventory_conversion.ui2backend(record_updated, delete=False) - mongo_inventory.update_one({"_id": ObjectId(record_id)}, {"$set": record_updated}) \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/ui_handling/routes.py b/backend/SC4SNMP_UI_backend/ui_handling/routes.py deleted file mode 100644 index de23194..0000000 --- a/backend/SC4SNMP_UI_backend/ui_handling/routes.py +++ /dev/null @@ -1,309 +0,0 @@ -from bson import ObjectId -from flask import request, Blueprint, jsonify -from flask_cors import cross_origin -from SC4SNMP_UI_backend import mongo_client -from SC4SNMP_UI_backend.common.conversions import ProfileConversion, GroupConversion, GroupDeviceConversion, \ - InventoryConversion, get_group_name_from_backend -from copy import copy -from SC4SNMP_UI_backend.ui_handling.helpers import update_profiles_in_inventory, check_if_inventory_can_be_added, \ - InventoryAddEdit - -ui = Blueprint('ui', __name__) - -profile_conversion = ProfileConversion() -group_conversion = GroupConversion() -group_device_conversion = GroupDeviceConversion() -inventory_conversion = InventoryConversion() -mongo_profiles = mongo_client.sc4snmp.profiles_ui -mongo_groups = mongo_client.sc4snmp.groups_ui -mongo_inventory = mongo_client.sc4snmp.inventory_ui - - -# @cross_origin(origins='*', headers=['access-control-allow-origin', 'Content-Type']) -@ui.route('/profiles/names') -@cross_origin() -def get_profile_names(): - profiles = list(mongo_profiles.find()) - profiles_list = [] - for pr in profiles: - converted = profile_conversion.backend2ui(pr) - if converted['conditions']['condition'] not in ['mandatory', 'base']: - profiles_list.append(converted) - return jsonify([el["profileName"] for el in profiles_list]) - -@ui.route('/profiles/count') -@cross_origin() -def get_profiles_count(): - total_count = mongo_profiles.count_documents({}) - return jsonify(total_count) - -@ui.route('/profiles//') -@cross_origin() -def get_profiles_list(page_num, prof_per_page): - page_num = int(page_num) - prof_per_page = int(prof_per_page) - skips = prof_per_page * (page_num - 1) - - profiles = list(mongo_profiles.find().skip(skips).limit(prof_per_page)) - profiles_list = [] - for pr in profiles: - converted = profile_conversion.backend2ui(pr) - if converted['conditions']['condition'] not in ['mandatory']: - profiles_list.append(converted) - return jsonify(profiles_list) - - -@ui.route('/profiles') -@cross_origin() -def get_all_profiles_list(): - profiles = list(mongo_profiles.find()) - profiles_list = [] - for pr in profiles: - converted = profile_conversion.backend2ui(pr) - if converted['conditions']['condition'] not in ['mandatory']: - profiles_list.append(converted) - return jsonify(profiles_list) - - -@ui.route('/profiles/add', methods=['POST']) -@cross_origin() -def add_profile_record(): - profile_obj = request.json - same_name_profiles = list(mongo_profiles.find({f"{profile_obj['profileName']}": {"$exists": True}})) - if len(same_name_profiles) > 0: - result = jsonify( - {"message": f"Profile with name {profile_obj['profileName']} already exists. Profile was not added."}), 400 - else: - profile_obj = profile_conversion.ui2backend(profile_obj) - mongo_profiles.insert_one(profile_obj) - result = jsonify("success") - return result - -@ui.route('/profiles/delete/', methods=['POST']) -@cross_origin() -def delete_profile_record(profile_id): - profile = list(mongo_profiles.find({'_id': ObjectId(profile_id)}, {"_id": 0}))[0] - profile_name = list(profile.keys())[0] - - # Find records from inventory where this profile was used. - def delete_profile(index, record_to_update, kwargs): - record_to_update["profiles"].pop(index) - return record_to_update - update_profiles_in_inventory(profile_name, delete_profile) - - mongo_profiles.delete_one({'_id': ObjectId(profile_id)}) - return jsonify({"message": f"If {profile_name} was used in some records in the inventory," - f" those records were updated"}), 200 - - -@ui.route('/profiles/update/', methods=['POST']) -@cross_origin() -def update_profile_record(profile_id): - profile_obj = request.json - new_profile_name = profile_obj['profileName'] - - same_name_profiles = list(mongo_profiles.find({f"{new_profile_name}": {"$exists": True}, "_id": {"$ne": ObjectId(profile_id)}})) - if len(same_name_profiles) > 0: - return jsonify( - {"message": f"Profile with name {new_profile_name} already exists. Profile was not edited."}), 400 - - profile_obj = profile_conversion.ui2backend(profile_obj) - - old_profile = list(mongo_profiles.find({'_id': ObjectId(profile_id)}, {"_id": 0}))[0] - old_profile_name = list(old_profile.keys())[0] - - # If profile name was changed update it and also update all inventory records where this profile is used - if old_profile_name != new_profile_name: - mongo_profiles.update_one({'_id': ObjectId(profile_id)}, - {"$rename": {f"{old_profile_name}": f"{new_profile_name}"}}) - - def update_name(index, record_to_update, kwargs): - record_to_update["profiles"][index] = kwargs["new_name"] - return record_to_update - update_profiles_in_inventory(old_profile_name, update_name, new_name=new_profile_name) - - result = jsonify({"message": f"If {old_profile_name} was used in some records in the inventory," - f" it was updated to {new_profile_name}"}), 200 - else: - result = jsonify("success"), 200 - - mongo_profiles.update_one({'_id': ObjectId(profile_id)}, - {"$set": {new_profile_name: profile_obj[new_profile_name]}}) - return result - - -@ui.route('/groups') -@cross_origin() -def get_groups_list(): - groups = mongo_groups.find() - groups_list = [] - for gr in list(groups): - groups_list.append(group_conversion.backend2ui(gr)) - return jsonify(groups_list) - - -@ui.route('/groups/add', methods=['POST']) -@cross_origin() -def add_group_record(): - group_obj = request.json - same_name_groups = list(mongo_groups.find({f"{group_obj['groupName']}": {"$exists": True}})) - if len(same_name_groups) > 0: - result = jsonify( - {"message": f"Group with name {group_obj['groupName']} already exists. Group was not added."}), 400 - else: - group_obj = group_conversion.ui2backend(group_obj) - mongo_groups.insert_one(group_obj) - result = jsonify("success") - return result - - -@ui.route('/groups/update/', methods=['POST']) -@cross_origin() -def update_group(group_id): - group_obj = request.json - same_name_groups = list(mongo_groups.find({f"{group_obj['groupName']}": {"$exists": True}})) - if len(same_name_groups) > 0: - result = jsonify( - {"message": f"Group with name {group_obj['groupName']} already exists. Group was not edited."}), 400 - else: - old_group = list(mongo_groups.find({'_id': ObjectId(group_id)}))[0] - old_group_name = get_group_name_from_backend(old_group) - mongo_groups.update_one({'_id': old_group['_id']}, {"$rename": {f"{old_group_name}": f"{group_obj['groupName']}"}}) - - # Rename corresponding group in the inventory - mongo_inventory.update_one({"address": old_group_name}, {"$set": {"address": group_obj['groupName']}}) - result = jsonify({"message": f"{old_group_name} was also renamed to {group_obj['groupName']} in the inventory"}), 200 - return result - - -@ui.route('/groups/delete/', methods=['POST']) -@cross_origin() -def delete_group_and_devices(group_id): - group = list(mongo_groups.find({'_id': ObjectId(group_id)}))[0] - group_name = get_group_name_from_backend(group) - with mongo_client.start_session() as session: - with session.start_transaction(): - mongo_groups.delete_one({'_id': ObjectId(group_id)}) - mongo_inventory.update_one({"address": group_name}, {"$set": {"delete": True}}) - return jsonify({"message": f"If {group_name} was configured in the inventory, it was deleted from there"}), 200 - - -@ui.route('/group//devices/count') -@cross_origin() -def get_devices_count_for_group(group_id): - group = list(mongo_groups.find({"_id": ObjectId(group_id)}))[0] - group_name = get_group_name_from_backend(group) - total_count = len(group[group_name]) - return jsonify(total_count) - - -@ui.route('/group//devices//') -@cross_origin() -def get_devices_of_group(group_id, page_num, dev_per_page): - page_num = int(page_num) - dev_per_page = int(dev_per_page) - skips = dev_per_page * (page_num - 1) - group = list(mongo_groups.find({"_id": ObjectId(group_id)}))[0] - - group_name = get_group_name_from_backend(group) - devices_list = [] - for i, device in enumerate(group[group_name]): - devices_list.append(group_device_conversion.backend2ui(device, group_id=group_id, device_id=copy(i))) - devices_list = devices_list[skips:skips+dev_per_page] - return jsonify(devices_list) - - -@ui.route('/devices/add', methods=['POST']) -@cross_origin() -def add_device_to_group(): - device_obj = request.json - group_id = device_obj["groupId"] - group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] - device_obj = group_device_conversion.ui2backend(device_obj) - - new_device_port = device_obj.get('port', -1) - group_name = get_group_name_from_backend(group) - for device in group[group_name]: - old_device_port = device.get('port', -1) - if device["address"] == device_obj["address"] and old_device_port == new_device_port: - return jsonify( - {"message": f"Device {device_obj['address']}:{device_obj.get('port', '')} already exists. " - f"Record was not added"}), 400 - - group[group_name].append(device_obj) - new_values = {"$set": group} - mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) - return jsonify("success") - - -@ui.route('/devices/update/', methods=['POST']) -@cross_origin() -def update_device_from_group(device_id): - device_obj = request.json - group_id = device_id.split("-")[0] - device_id = device_id.split("-")[1] - group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] - device_obj = group_device_conversion.ui2backend(device_obj) - - group_name = get_group_name_from_backend(group) - group[group_name][int(device_id)] = device_obj - new_values = {"$set": group} - mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) - return jsonify("success") - - -@ui.route('/devices/delete/', methods=['POST']) -@cross_origin() -def delete_device_from_group_record(device_id: str): - group_id = device_id.split("-")[0] - device_id = device_id.split("-")[1] - group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] - group_name = get_group_name_from_backend(group) - group[group_name].pop(int(device_id)) - new_values = {"$set": group} - mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) - return jsonify("success") - - -@ui.route('/inventory//') -@cross_origin() -def get_inventory_list(page_num, dev_per_page): - page_num = int(page_num) - dev_per_page = int(dev_per_page) - skips = dev_per_page * (page_num - 1) - - inventory = list(mongo_inventory.find({"delete": False}).skip(skips).limit(dev_per_page)) - inventory_list = [] - for inv in inventory: - inventory_list.append(inventory_conversion.backend2ui(inv)) - return jsonify(inventory_list) - - -@ui.route('/inventory/count') -@cross_origin() -def get_inventory_count(): - total_count = mongo_inventory.count_documents({"delete": False}) - return jsonify(total_count) - - -@ui.route('/inventory/add', methods=['POST']) -@cross_origin() -def add_inventory_record(): - inventory_obj = request.json - inventory_obj = inventory_conversion.ui2backend(inventory_obj, delete=False) - return check_if_inventory_can_be_added(inventory_obj, InventoryAddEdit.ADD, None) - - -@ui.route('/inventory/delete/', methods=['POST']) -@cross_origin() -def delete_inventory_record(inventory_id): - mongo_inventory.update_one({"_id": ObjectId(inventory_id)}, {"$set": {"delete": True}}) - return jsonify("success") - - -@ui.route('/inventory/update/', methods=['POST']) -@cross_origin() -def update_inventory_record(inventory_id): - inventory_obj = request.json - inventory_obj = inventory_conversion.ui2backend(inventory_obj, delete=False) - return check_if_inventory_can_be_added(inventory_obj, InventoryAddEdit.EDIT, inventory_id) diff --git a/backend/app.py b/backend/app.py index a072c18..759eedd 100644 --- a/backend/app.py +++ b/backend/app.py @@ -1,6 +1,7 @@ from SC4SNMP_UI_backend import create_app -app = create_app() +flask_app = create_app() +celery_app = flask_app.extensions["celery"] if __name__ == '__main__': - app.run() + flask_app.run() diff --git a/backend/celery_start.sh b/backend/celery_start.sh new file mode 100644 index 0000000..0628580 --- /dev/null +++ b/backend/celery_start.sh @@ -0,0 +1,5 @@ +set -o errexit +set -o nounset + +cd /app +celery -A app worker -Q apply_changes --loglevel INFO \ No newline at end of file diff --git a/backend/flask_start.sh b/backend/flask_start.sh new file mode 100644 index 0000000..38d5ce4 --- /dev/null +++ b/backend/flask_start.sh @@ -0,0 +1,4 @@ +set -o errexit +set -o nounset +cd /app +gunicorn -b :5000 app:flask_app --log-level INFO \ No newline at end of file diff --git a/backend/package-lock.json b/backend/package-lock.json deleted file mode 100644 index d8fb6a1..0000000 --- a/backend/package-lock.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "name": "flask_inventory", - "lockfileVersion": 2, - "requires": true, - "packages": { - "": { - "dependencies": { - "axios": "^0.27.2" - } - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "node_modules/axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", - "dependencies": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" - } - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/follow-redirects": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", - "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - } - }, - "dependencies": { - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", - "requires": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" - } - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" - }, - "follow-redirects": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", - "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==" - }, - "form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - } - }, - "mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" - }, - "mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "requires": { - "mime-db": "1.52.0" - } - } - } -} diff --git a/backend/package.json b/backend/package.json deleted file mode 100644 index 9cf5ca6..0000000 --- a/backend/package.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "dependencies": { - "axios": "^0.27.2" - } -} diff --git a/backend/requirements.txt b/backend/requirements.txt index f676171..620a31d 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -7,5 +7,11 @@ MarkupSafe==2.1.1 pymongo==4.1.1 six==1.16.0 Werkzeug==2.2.3 -pytest +pytest~=7.2.0 gunicorn +kubernetes~=26.1.0 +python-dotenv~=0.21.0 +PyYAML~=6.0 +celery==5.2.7 +redis==4.5.5 +ruamel.yaml===0.17.32 \ No newline at end of file diff --git a/backend/tests/common/__init__.py b/backend/tests/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/common/test_conversions.py b/backend/tests/common/test_backend_ui_conversions.py similarity index 66% rename from backend/tests/common/test_conversions.py rename to backend/tests/common/test_backend_ui_conversions.py index ac19844..b1ef9d6 100644 --- a/backend/tests/common/test_conversions.py +++ b/backend/tests/common/test_backend_ui_conversions.py @@ -1,5 +1,5 @@ from unittest import TestCase -from SC4SNMP_UI_backend.common.conversions import ProfileConversion, GroupConversion, GroupDeviceConversion, \ +from SC4SNMP_UI_backend.common.backend_ui_conversions import ProfileConversion, GroupConversion, GroupDeviceConversion, \ InventoryConversion from bson import ObjectId @@ -13,6 +13,7 @@ class TestConversions(TestCase): @classmethod def setUpClass(cls): + cls.maxDiff = None common_id = "635916b2c8cb7a15f28af40a" cls.ui_prof_1 = { @@ -20,13 +21,16 @@ def setUpClass(cls): "profileName": "profile_1", "frequency": 10, "conditions": { - "condition": "None", + "condition": "standard", "field": "", - "patterns": None + "patterns": [], + "conditions": [] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1.test.2"}, + {"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + "profileInInventory": True } cls.ui_prof_2 = { @@ -36,11 +40,13 @@ def setUpClass(cls): "conditions": { "condition": "base", "field": "", - "patterns": None + "patterns": [], + "conditions": [] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + "profileInInventory": False } cls.ui_prof_3 = { @@ -48,20 +54,46 @@ def setUpClass(cls): "profileName": "profile_3", "frequency": 30, "conditions": { - "condition": "field", + "condition": "smart", "field": "SNMPv2-MIB.sysObjectID", - "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] + "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}], + "conditions": [] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + "profileInInventory": True + } + + cls.ui_prof_4 = { + "_id": common_id, + "profileName": "profile_4", + "frequency": 30, + "conditions": { + "condition": "conditional", + "field": "", + "patterns": [], + "conditions": [ + {"field": "field: IF-MIB.ifAdminStatus", "operation": "in", "value":["0", "down"], 'negateOperation': False,}, + {"field": "field: IF-MIB.ifOperStatus", "operation": "equals", "value": ["up"], 'negateOperation': True,}, + {"field": "field: IF-MIB.ifIndex", "operation": "less than", "value": ["3"], 'negateOperation': False,}, + {"field": "field: IF-MIB.ifIndex", "operation": "greater than", "value": ["5"], 'negateOperation': True,} + ] + }, + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + "profileInInventory": False } cls.backend_prof_1 = { "_id": ObjectId(common_id), "profile_1": { "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + "varBinds": [["IF-MIB", "ifInDiscards", "1", "test", "2"], + ["IF-MIB", "ifInDiscards", "1"], + ["IF-MIB"], + ["IF-MIB", "ifOutErrors"]] } } @@ -70,7 +102,7 @@ def setUpClass(cls): "profile_2": { "frequency": 20, "condition": {"type": "base"}, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] } } @@ -81,7 +113,21 @@ def setUpClass(cls): "condition": {"type": "field", "field": "SNMPv2-MIB.sysObjectID", "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + cls.backend_prof_4 = { + "_id": ObjectId(common_id), + "profile_4": { + "frequency": 30, + "conditions": [ + {"field": "field: IF-MIB.ifAdminStatus", "operation": "in", "value": [0, "down"]}, + {"field": "field: IF-MIB.ifOperStatus", "operation": "equals", "value": "up", 'negate_operation': True}, + {"field": "field: IF-MIB.ifIndex", "operation": "lt", "value": 3}, + {"field": "field: IF-MIB.ifIndex", "operation": "gt", "value": 5, 'negate_operation': True} + ], + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] } } @@ -97,7 +143,8 @@ def setUpClass(cls): cls.ui_group = { "_id": common_id, - "groupName": "group_1" + "groupName": "group_1", + "groupInInventory": False } cls.ui_group_device_1 = { @@ -160,6 +207,7 @@ def setUpClass(cls): cls.ui_inventory_1 = { "_id": common_id, + "inventoryType": "Host", "address": "11.0.78.114", "port": "161", "version": "3", @@ -187,6 +235,7 @@ def setUpClass(cls): cls.ui_inventory_2 = { "_id": common_id, + "inventoryType": "Group", "address": "group_1", "port": "1161", "version": "2c", @@ -199,9 +248,10 @@ def setUpClass(cls): } def test_profile_backend_to_ui(self): - self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_1), self.ui_prof_1) - self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_2), self.ui_prof_2) - self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_3), self.ui_prof_3) + self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_1, profile_in_inventory=True), self.ui_prof_1) + self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_2, profile_in_inventory=False), self.ui_prof_2) + self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_3, profile_in_inventory=True), self.ui_prof_3) + self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_4, profile_in_inventory=False), self.ui_prof_4) def test_profile_ui_to_backend(self): back_pr1 = self.backend_prof_1 @@ -212,12 +262,17 @@ def test_profile_ui_to_backend(self): back_pr3 = self.backend_prof_3 del back_pr3["_id"] + + back_pr4 = self.backend_prof_4 + del back_pr4["_id"] + self.assertDictEqual(profile_conversion.ui2backend(self.ui_prof_1), back_pr1) self.assertDictEqual(profile_conversion.ui2backend(self.ui_prof_2), back_pr2) self.assertDictEqual(profile_conversion.ui2backend(self.ui_prof_3), back_pr3) + self.assertDictEqual(profile_conversion.ui2backend(self.ui_prof_4), back_pr4) def test_group_backend_to_ui(self): - self.assertDictEqual(group_conversion.backend2ui(self.backend_group), self.ui_group) + self.assertDictEqual(group_conversion.backend2ui(self.backend_group, group_in_inventory=False), self.ui_group) def test_group_ui_to_backend(self): new_group_from_ui = { @@ -267,8 +322,8 @@ def test_group_device_ui_to_backend(self): self.assertDictEqual(group_device_conversion.ui2backend(self.ui_group_device_4), device) def test_inventory_backend_to_ui(self): - self.assertDictEqual(inventory_conversion.backend2ui(self.backend_inventory_1), self.ui_inventory_1) - self.assertDictEqual(inventory_conversion.backend2ui(self.backend_inventory_2), self.ui_inventory_2) + self.assertDictEqual(inventory_conversion.backend2ui(self.backend_inventory_1, inventory_type="Host"), self.ui_inventory_1) + self.assertDictEqual(inventory_conversion.backend2ui(self.backend_inventory_2, inventory_type="Group"), self.ui_inventory_2) def test_inventory_ui_to_backend(self): back_inv = self.backend_inventory_1 diff --git a/backend/tests/ui_handling/__init__.py b/backend/tests/ui_handling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/ui_handling/create_job_object.py b/backend/tests/ui_handling/create_job_object.py new file mode 100644 index 0000000..fa5d0b4 --- /dev/null +++ b/backend/tests/ui_handling/create_job_object.py @@ -0,0 +1,173 @@ +import yaml +from kubernetes import client +from SC4SNMP_UI_backend.apply_changes.kubernetes_job import create_job_object + +JOB_CONFIGURATION_YAML = yaml.safe_load(""" +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-splunk-connect-for-snmp-inventory + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-inventory + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-1.9.0 + app.kubernetes.io/version: "1.9.0" + app.kubernetes.io/managed-by: Helm +spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + annotations: + imageregistry: https://hub.docker.com/ + + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-inventory + app.kubernetes.io/instance: release-name + spec: + imagePullSecrets: + - name: myregistrykey + containers: + - name: splunk-connect-for-snmp-inventory + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.9.0" + imagePullPolicy: Always + args: + ["inventory"] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/1 + - name: INVENTORY_PATH + value: /app/inventory/inventory.csv + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: LOG_LEVEL + value: INFO + - name: CONFIG_FROM_MONGO + value: "true" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: inventory + mountPath: "/app/inventory" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: inventory + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-inventory + # An array of keys from the ConfigMap to create as files + items: + - key: "inventory.csv" + path: "inventory.csv" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + restartPolicy: OnFailure +""") + +def test_create_job_object(): + expected_job = client.V1Job( + api_version="batch/v1", + kind="Job", + metadata=client.V1ObjectMeta( + name="release-name-splunk-connect-for-snmp-inventory", + labels={ + "app.kubernetes.io/name": "splunk-connect-for-snmp-inventory", + "app.kubernetes.io/instance": "release-name", + "helm.sh/chart": "splunk-connect-for-snmp-1.9.0", + "app.kubernetes.io/version": "1.9.0", + "app.kubernetes.io/managed-by": "Helm" + } + ), + spec=client.V1JobSpec( + ttl_seconds_after_finished=300, + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + annotations={ + "imageregistry": "https://hub.docker.com/" + }, + labels={ + "app.kubernetes.io/name": "splunk-connect-for-snmp-inventory", + "app.kubernetes.io/instance": "release-name" + } + ), + spec=client.V1PodSpec( + image_pull_secrets=[client.V1LocalObjectReference(name="myregistrykey")], + containers=[ + client.V1Container( + name="splunk-connect-for-snmp-inventory", + image="ghcr.io/splunk/splunk-connect-for-snmp/container:1.9.0", + image_pull_policy="Always", + args=["inventory"], + env=[ + client.V1EnvVar(name="CONFIG_PATH",value="/app/config/config.yaml"), + client.V1EnvVar(name="REDIS_URL", value="redis://release-name-redis-headless:6379/1"), + client.V1EnvVar(name="INVENTORY_PATH", value="/app/inventory/inventory.csv"), + client.V1EnvVar(name="CELERY_BROKER_URL", value="redis://release-name-redis-headless:6379/0"), + client.V1EnvVar(name="MONGO_URI", value="mongodb://release-name-mongodb:27017"), + client.V1EnvVar(name="MIB_SOURCES", value="http://release-name-mibserver/asn1/@mib@"), + client.V1EnvVar(name="MIB_INDEX", value="http://release-name-mibserver/index.csv"), + client.V1EnvVar(name="MIB_STANDARD", value="http://release-name-mibserver/standard.txt"), + client.V1EnvVar(name="LOG_LEVEL", value="INFO"), + client.V1EnvVar(name="CONFIG_FROM_MONGO", value="true") + ], + volume_mounts=[ + client.V1VolumeMount(name="config", mount_path="/app/config", read_only=True), + client.V1VolumeMount(name="inventory", mount_path="/app/inventory", read_only=True), + client.V1VolumeMount(name="pysnmp-cache-volume", mount_path="/.pysnmp/", read_only=False), + client.V1VolumeMount(name="tmp", mount_path="/tmp/", read_only=False), + ] + ) + ], + volumes=[ + client.V1Volume(name="config", + config_map=client.V1ConfigMapVolumeSource( + name="splunk-connect-for-snmp-config", + items=[ + client.V1KeyToPath(key="config.yaml",path="config.yaml") + ] + )), + client.V1Volume(name="inventory", + config_map=client.V1ConfigMapVolumeSource( + name="splunk-connect-for-snmp-inventory", + items=[ + client.V1KeyToPath(key="inventory.csv", path="inventory.csv") + ] + )), + client.V1Volume(name="pysnmp-cache-volume", empty_dir=client.V1EmptyDirVolumeSource()), + client.V1Volume(name="tmp", empty_dir=client.V1EmptyDirVolumeSource()) + ], + restart_policy="OnFailure" + ) + ) + ) + ) + + assert create_job_object(JOB_CONFIGURATION_YAML) == expected_job \ No newline at end of file diff --git a/backend/tests/ui_handling/get_endpoints/__init__.py b/backend/tests/ui_handling/get_endpoints/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/ui_handling/test_get_endpoints.py b/backend/tests/ui_handling/get_endpoints/test_get_endpoints.py similarity index 88% rename from backend/tests/ui_handling/test_get_endpoints.py rename to backend/tests/ui_handling/get_endpoints/test_get_endpoints.py index e293a6b..9f9848e 100644 --- a/backend/tests/ui_handling/test_get_endpoints.py +++ b/backend/tests/ui_handling/get_endpoints/test_get_endpoints.py @@ -1,7 +1,6 @@ from unittest import mock from bson import ObjectId - @mock.patch("pymongo.collection.Collection.find") def test_get_profile_names(m_client, client): m_client.return_value = [{ @@ -53,13 +52,15 @@ def test_get_all_profiles_list(m_client, client): "profileName": "profile_1", "frequency": 10, "conditions": { - "condition": "None", + "condition": "standard", + "conditions": [], "field": "", - "patterns": None + "patterns": [] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + 'profileInInventory': True, } ui_prof_2 = { @@ -67,13 +68,15 @@ def test_get_all_profiles_list(m_client, client): "profileName": "profile_2", "frequency": 30, "conditions": { - "condition": "field", + "condition": "smart", + "conditions": [], "field": "SNMPv2-MIB.sysObjectID", "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + 'profileInInventory': True, } response = client.get('/profiles') @@ -84,8 +87,9 @@ def test_get_all_profiles_list(m_client, client): @mock.patch("pymongo.collection.Collection.find") def test_get_groups_list(m_client, client): common_id = "635916b2c8cb7a15f28af40a" - m_client.return_value = [ - { + + m_client.side_effect = [ + [{ "_id": common_id, "group_1": [ {"address": "1.2.3.4"} @@ -96,17 +100,21 @@ def test_get_groups_list(m_client, client): "group_2": [ {"address": "1.2.3.4"} ] - } + }], + [], + [{"address": "group_2"}] ] expected_groups = [ { "_id": common_id, - "groupName": "group_1" + "groupName": "group_1", + "groupInInventory": False }, { "_id": common_id, - "groupName": "group_2" + "groupName": "group_2", + "groupInInventory": True } ] @@ -255,8 +263,9 @@ def test_get_devices_of_group(m_client, client): assert response.json == third_result +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") @mock.patch("pymongo.cursor.Cursor.limit") -def test_get_inventory_list(m_cursor, client): +def test_get_inventory_list(m_cursor, m_get_inventory_type, client): common_id = "635916b2c8cb7a15f28af40a" m_cursor.side_effect = [ @@ -305,9 +314,12 @@ def test_get_inventory_list(m_cursor, client): ] ] + m_get_inventory_type.side_effect = ["Host", "Group", "Group"] + first_result = [ { "_id": common_id, + "inventoryType": "Host", "address": "11.0.78.114", "port": "161", "version": "3", @@ -320,6 +332,7 @@ def test_get_inventory_list(m_cursor, client): }, { "_id": common_id, + "inventoryType": "Group", "address": "group_1", "port": "1161", "version": "2c", @@ -335,6 +348,7 @@ def test_get_inventory_list(m_cursor, client): second_result = [ { "_id": common_id, + "inventoryType": "Group", "address": "group_2", "port": "161", "version": "3", diff --git a/backend/tests/ui_handling/post_endpoints/__init__.py b/backend/tests/ui_handling/post_endpoints/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py b/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py new file mode 100644 index 0000000..58cebec --- /dev/null +++ b/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py @@ -0,0 +1,299 @@ +from unittest import mock +from unittest.mock import call +from bson import ObjectId +from copy import copy +import ruamel +import datetime +import os +from SC4SNMP_UI_backend.apply_changes.handling_chain import TMP_FILE_PREFIX + +VALUES_TEST_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../yamls_for_tests/values_test") +REFERENCE_FILES_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../yamls_for_tests/reference_files") + +def return_generated_and_reference_files(): + reference_files_names = ["poller_inventory.yaml", "scheduler_profiles.yaml", "scheduler_groups.yaml"] + reference_files = [] + generated_files = [] + yaml = ruamel.yaml.YAML() + + for file_name in reference_files_names: + # add temporary files + reference_file_path = os.path.join(REFERENCE_FILES_DIRECTORY, file_name) + with open(reference_file_path, "r") as file: + data = yaml.load(file) + reference_files.append(copy(data)) + + generated_file_path = os.path.join(VALUES_TEST_DIRECTORY, f"{TMP_FILE_PREFIX}{file_name}") + with open(generated_file_path, "r") as file: + data = yaml.load(file) + generated_files.append(copy(data)) + + # add values files + edited_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values.yaml") + original_values_path = os.path.join(REFERENCE_FILES_DIRECTORY, "values.yaml") + with open(original_values_path, "r") as file: + data = yaml.load(file) + reference_files.append(copy(data)) + with open(edited_values_path, "r") as file: + data = yaml.load(file) + generated_files.append(copy(data)) + return reference_files, generated_files + +def delete_generated_files(): + reference_files_names = ["poller_inventory.yaml", "scheduler_profiles.yaml", "scheduler_groups.yaml"] + for file_name in reference_files_names: + generated_file_path = os.path.join(VALUES_TEST_DIRECTORY, f"{TMP_FILE_PREFIX}{file_name}") + if os.path.exists(generated_file_path): + os.remove(generated_file_path) + +def reset_generated_values(): + edited_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values.yaml") + original_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values-before-edit.yaml") + yaml = ruamel.yaml.YAML() + with open(original_values_path, "r") as file: + original_data = yaml.load(file) + with open(edited_values_path, "w") as file: + yaml.dump(original_data, file) + + +common_id = "635916b2c8cb7a15f28af40a" + +groups_collection = [ + { + "_id": ObjectId(common_id), + "group1": [ + {"address": "52.14.243.157", "port": 1163}, + {"address": "20.14.10.0", "port": 161}, + ], + }, + { + "_id": ObjectId(common_id), + "group2": [ + {"address": "0.10.20.30"}, + {"address": "52.14.243.157", "port": 1165, "version": "3", "secret": "mysecret", "security_engine": "aabbccdd1234"}, + ] + } +] + +profiles_collection = [ + { + "_id": ObjectId(common_id), + "single_metric":{ + "frequency": 60, + "varBinds":[['IF-MIB', 'ifMtu', '1']] + } + }, + { + "_id": ObjectId(common_id), + "small_walk":{ + "condition":{ + "type": "walk" + }, + "varBinds":[['IP-MIB'],['IF-MIB']] + } + }, + { + "_id": ObjectId(common_id), + "gt_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "gt", "value": 1} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "lt_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "lt", "value": 2} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "in_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifDescr", "operation": "in", "value": ["eth0", "test value"]} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "multiple_conditions":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "gt", "value": 1}, + {"field": "IF-MIB.ifDescr", "operation": "in", "value": ["eth0", "test value"]} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards'],['IF-MIB', 'ifOutErrors'],['IF-MIB', 'ifOutOctets']] + } + } +] + +inventory_collection = [ + { + "_id": ObjectId(common_id), + "address": "1.1.1.1", + "port": 161, + "version": "2c", + "community": "public", + "secret": "", + "security_engine": "", + "walk_interval": 1800, + "profiles": "small_walk;in_profile", + "smart_profiles": True, + "delete": False + }, + { + "_id": ObjectId(common_id), + "address": "group1", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "security_engine": "", + "walk_interval": 1800, + "profiles": "single_metric;multiple_conditions", + "smart_profiles": False, + "delete": False + } +] + +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_FILE", "values.yaml") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.KEEP_TEMP_FILES", "true") +@mock.patch("datetime.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_apply_changes_first_call(m_find, m_update, m_run_job, m_datetime, client): + datetime_object = datetime.datetime(2020, 7, 10, 10, 30, 0, 0) + m_datetime.utcnow = mock.Mock(return_value=datetime_object) + collection = { + "_id": ObjectId(common_id), + "previous_job_start_time": None, + "currently_scheduled": False + } + m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler + [collection], + [collection], + [collection] + ] + calls_find = [ + call(), + call(), + call() + ] + calls_update = [ + call({"_id": ObjectId(common_id)},{"$set": {"previous_job_start_time": datetime_object}}), + call({"_id": ObjectId(common_id)},{"$set": {"currently_scheduled": True}}) + ] + apply_async_calls = [ + call(countdown=300, queue='apply_changes') + ] + + m_run_job.apply_async.return_value = None + m_update.return_value = None + + response = client.post("/apply-changes") + m_find.assert_has_calls(calls_find) + m_update.assert_has_calls(calls_update) + m_run_job.apply_async.assert_has_calls(apply_async_calls) + assert response.json == {"message": "Configuration will be updated in approximately 300 seconds."} + reference_files, generated_files = return_generated_and_reference_files() + for ref_f, gen_f in zip(reference_files, generated_files): + assert ref_f == gen_f + delete_generated_files() + reset_generated_values() + +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_apply_changes_job_currently_scheduled(m_find, m_update, m_run_job, m_datetime, client): + datetime_object_old = datetime.datetime(2020, 7, 10, 10, 27, 10, 0) + datetime_object_new = datetime.datetime(2020, 7, 10, 10, 30, 0, 0) + m_datetime.datetime.utcnow = mock.Mock(return_value=datetime_object_new) + collection = { + "_id": ObjectId(common_id), + "previous_job_start_time": datetime_object_old, + "currently_scheduled": True + } + m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler + [collection], + [collection], + [collection] + ] + calls_find = [ + call(), + call(), + call() + ] + m_run_job.apply_async.return_value = None + m_update.return_value = None + + response = client.post("/apply-changes") + m_find.assert_has_calls(calls_find) + assert not m_run_job.apply_async.called + assert response.json == {"message": "Configuration will be updated in approximately 130 seconds."} + delete_generated_files() + reset_generated_values() + + +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_apply_changes_new_job_delay_1(m_find, m_update, m_run_job, m_datetime, client): + datetime_object_old = datetime.datetime(2020, 7, 10, 10, 20, 0, 0) + datetime_object_new = datetime.datetime(2020, 7, 10, 10, 30, 0, 0) + m_datetime.datetime.utcnow = mock.Mock(return_value=datetime_object_new) + collection = { + "_id": ObjectId(common_id), + "previous_job_start_time": datetime_object_old, + "currently_scheduled": False + } + m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler + [collection], + [collection], + [collection] + ] + calls_find = [ + call(), + call(), + call() + ] + apply_async_calls = [ + call(countdown=1, queue='apply_changes') + ] + + m_run_job.apply_async.return_value = None + m_update.return_value = None + + response = client.post("/apply-changes") + m_find.assert_has_calls(calls_find) + m_run_job.apply_async.assert_has_calls(apply_async_calls) + assert response.json == {"message": "Configuration will be updated in approximately 1 seconds."} + delete_generated_files() + reset_generated_values() diff --git a/backend/tests/ui_handling/post_endpoints/test_post_groups.py b/backend/tests/ui_handling/post_endpoints/test_post_groups.py new file mode 100644 index 0000000..be59eda --- /dev/null +++ b/backend/tests/ui_handling/post_endpoints/test_post_groups.py @@ -0,0 +1,579 @@ +from unittest import mock +from unittest.mock import call, Mock +from bson import ObjectId + +common_id = "635916b2c8cb7a15f28af40a" + +# TEST ADDING GROUP +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_record_success(m_find, m_insert, client): + ui_group = { + "groupName": "group_1" + } + + backend_group = { + "group_1": [] + } + + find_calls = [ + call({'address': 'group_1', 'delete': False}), + call({f"group_1": {"$exists": True}}), + ] + m_find.side_effect = [[],[]] + + response = client.post(f"/groups/add", json=ui_group) + m_insert.return_value = None + m_find.has_calls(find_calls) + assert m_insert.call_args == call(backend_group) + assert response.json == "success" + +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_record_with_already_existing_name_failure(m_find, m_insert, client): + ui_group = { + "groupName": "group_1" + } + + backend_group = { + "group_1": [] + } + + m_find.side_effect = [ + [backend_group] + ] + + response = client.post(f"/groups/add", json=ui_group) + assert not m_insert.called + assert response.json == {"message": "Group with name group_1 already exists. Group was not added."} + +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_record_with_name_existing_in_inventory_as_hostname_failure(m_find, m_insert, client): + ui_group = { + "groupName": "test" + } + + m_find.side_effect = [ + [], + [{"address": "test"}] + ] + + response = client.post(f"/groups/add", json=ui_group) + assert not m_insert.called + assert response.json == {"message": "In the inventory there is a record with name test. Group was not added."} + +# TEST UPDATING GROUP +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_success(m_find, m_update, client): + + ui_group_new = { + "_id": common_id, + "groupName": "group_1_edit" + } + + backend_group_old = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4"}, + ] + } + + calls_update = [ + call({'_id': ObjectId(common_id)}, {"$rename": {"group_1": "group_1_edit"}}), + call({"address": "group_1"}, {"$set": {"address": 'group_1_edit'}}) + ] + + m_find.side_effect = [ + [], + [], + [backend_group_old] + ] + + response = client.post(f"/groups/update/{common_id}", json=ui_group_new) + m_update.assert_has_calls(calls_update) + assert response.json == {"message": "group_1 was also renamed to group_1_edit in the inventory"} + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_failure(m_find, m_update, client): + ui_group_new = { + "_id": common_id, + "groupName": "group_1_edit" + } + + backend_group_old = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4"}, + ] + } + + backend_group_existing = { + "_id": ObjectId(common_id), + "group_1_edit": [ + {"address": "1.2.3.4"}, + ] + } + + m_find.side_effect = [ + [backend_group_old], + [backend_group_existing] + ] + + response = client.post(f"/groups/update/{common_id}", json=ui_group_new) + #m_update.assert_has_calls(calls_update) + assert not m_update.called + assert response.json == {"message": "Group with name group_1_edit already exists. Group was not edited."} + +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_record_with_name_existing_in_inventory_as_hostname_failure(m_find, m_insert, client): + ui_group = { + "_id": common_id, + "groupName": "test" + } + + m_find.side_effect = [ + [], + [{"address": "test"}] + ] + + response = client.post(f"/groups/update/{common_id}", json=ui_group) + assert not m_insert.called + assert response.json == {"message": "In the inventory there is a record with name test. Group was not edited."} + +# TEST DELETING GROUP +@mock.patch("pymongo.collection.Collection.find") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.MongoClient.start_session") +def test_delete_group_and_devices(m_session, m_update, m_delete, m_find, client): + backend_group = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4"}, + ] + } + m_session.return_value.__enter__.return_value.start_transaction.__enter__ = Mock() + + m_find.side_effect = [ + [backend_group], + [] + ] + + calls_find = [ + call({'_id': ObjectId(common_id)}), + call({"address": "group_1"}) + ] + + m_delete.return_value = None + m_update.return_value = None + + response = client.post(f"/groups/delete/{common_id}") + m_find.assert_has_calls(calls_find) + assert m_delete.call_args == call({'_id': ObjectId(common_id)}) + assert m_update.call_args == call({"address": "group_1"}, {"$set": {"delete": True}}) + assert response.json == { + "message": "Group group_1 was deleted."} + + m_find.side_effect = [ + [backend_group], + [{}] + ] + + response = client.post(f"/groups/delete/{common_id}") + m_find.assert_has_calls(calls_find) + assert m_delete.call_args == call({'_id': ObjectId(common_id)}) + assert m_update.call_args == call({"address": "group_1"}, {"$set": {"delete": True}}) + assert response.json == { + "message": "Group group_1 was deleted. It was also deleted from the inventory."} + + +# TEST ADDING DEVICE +ui_group_device_add_new_success = lambda : { + "address": "2.2.2.2", + "port": "", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + +backend_group_add_device_old = lambda : { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4", "port": 161}, + ] + } + +backend_group_add_device_success_new = lambda : { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4", 'port': 161}, + {"address": "2.2.2.2", "version": "3", "secret": "snmpv3"} + ] + } + +group_inventory = lambda : { + "_id": ObjectId(common_id), + "address": "group_1", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_device_to_group_not_configured_in_inventory_success(m_find, m_update, client): + m_find.side_effect = [ + [backend_group_add_device_old()], + [], + [backend_group_add_device_old()] + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), + call({"address": "group_1", "delete": False}), + call({'_id': ObjectId(common_id)}, {"_id": 0}) + ] + m_update.return_value = None + + response = client.post(f"/devices/add", json=ui_group_device_add_new_success()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_add_device_success_new()}) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_device_to_group_configured_in_inventory_success(m_find, m_update, client): + + m_find.side_effect = [ + [backend_group_add_device_old()], # call from group/routes.add_device_to_group + [group_inventory()], # call from HandleNewDevice.add_group_host + [backend_group_add_device_old()], # call from HandleNewDevice.add_group_host + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [group_inventory()], # call from HandleNewDevice._is_host_in_group + [backend_group_add_device_old()], # call from HandleNewDevice._is_host_in_group + [] # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.add_device_to_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.add_group_host + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from HandleNewDevice.add_group_host + call({'address': "2.2.2.2", 'port': 1161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "2.2.2.2", 'port': 1161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_1": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({"2.2.2.2": {"$exists": True}}) # call from HandleNewDevice.add_single_host + ] + m_update.return_value = None + + response = client.post(f"/devices/add", json=ui_group_device_add_new_success()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_add_device_success_new()}) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_device_to_group_not_configured_in_inventory_failed(m_find, m_update, client): + ui_group_device_new = { + "address": "1.2.3.4", + "port": "161", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + + m_find.side_effect = [ + [backend_group_add_device_old()], # call from group/routes.add_device_to_group + [], # call from HandleNewDevice.add_group_host + [backend_group_add_device_old()] # call from HandleNewDevice.add_group_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.add_device_to_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.add_group_host + call({'_id': ObjectId(common_id)}, {"_id": 0}) # call from HandleNewDevice.add_group_host + ] + m_update.return_value = None + + response = client.post(f"/devices/add", json=ui_group_device_new) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert response.json == {'message': 'Host 1.2.3.4:161 already exists in group group_1. Record was not added.'} + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_device_to_group_configured_in_inventory_failed(m_find, m_update, client): + + ui_group_device_new = { + "address": "5.5.5.5", + "port": "161", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + + existing_device_inventory = { + "_id": ObjectId(common_id), + "address": "5.5.5.5", + "port": 161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + + m_find.side_effect = [ + [backend_group_add_device_old()], # call from group/routes.add_device_to_group + [group_inventory()], # call from HandleNewDevice.add_group_host + [backend_group_add_device_old()], # call from HandleNewDevice.add_group_host + [existing_device_inventory], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.add_device_to_groupp + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.add_group_host + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from HandleNewDevice.add_group_host + call({'address': "5.5.5.5", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "5.5.5.5", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"5.5.5.5": {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/devices/add", json=ui_group_device_new) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert response.json == {'message': 'Host 5.5.5.5:161 already exists in the inventory. Record was not added.'} + + +# TEST UPDATING DEVICES +backend_group_update_device_old = lambda : { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "2.2.2.2"}, + {"address": "3.3.3.3"} + ] + } + +backend_group_update_device_success_new = lambda : { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "2.2.2.3", "port": 1161, "version": "2c", "community": "public", + "security_engine": "1112233aabbccdee"}, + {"address": "3.3.3.3"} + ] + } + +ui_group_device_update_new_success = lambda : { + "address": "2.2.2.3", + "port": "1161", + "version": "2c", + "community": "public", + "secret": "", + "securityEngine": "1112233aabbccdee", + "groupId": str(common_id) + } + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_device_from_group_not_configured_in_inventory_success(m_find, m_update, client): + + m_find.side_effect = [ + [backend_group_update_device_old()], # call from group/routes.update_device_from_group + [], # call from HandleNewDevice.edit_group_host + [backend_group_update_device_old()] # call from HandleNewDevice.edit_group_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.update_device_from_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.edit_group_host + call({'_id': ObjectId(common_id)}) # call from HandleNewDevice.edit_group_host + ] + m_update.return_value = None + + response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_update_new_success()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_update_device_success_new()}) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_device_from_group_configured_in_inventory_success(m_find, m_update, client): + m_find.side_effect = [ + [backend_group_update_device_old()], # call from group/routes.update_device_from_group + [group_inventory()], # call from HandleNewDevice.edit_group_host + [backend_group_update_device_old()], # call from HandleNewDevice.edit_group_host + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [group_inventory()], # call from HandleNewDevice._is_host_in_group + [backend_group_update_device_old()] # call from HandleNewDevice._is_host_in_group + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.update_device_from_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.edit_group_host + call({'_id': ObjectId(common_id)}), # call from HandleNewDevice.edit_group_host + call({'address': "2.2.2.3", 'port': 1161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "2.2.2.3", 'port': 1161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_1": {"$exists": 1}}) # call from HandleNewDevice._is_host_in_group + ] + m_update.return_value = None + + response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_update_new_success()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_update_device_success_new()}) + assert response.json == "success" + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_device_from_group_not_configured_in_inventory_failed(m_find, m_update, client): + ui_group_device_new = { + "address": "3.3.3.3", + "port": "", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + m_find.side_effect = [ + [backend_group_update_device_old()], # call from group/routes.update_device_from_group + [], # call from HandleNewDevice.edit_group_host + [backend_group_update_device_old()] # call from HandleNewDevice.edit_group_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.add_device_to_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.edit_group_host + call({'_id': ObjectId(common_id)}) # call from HandleNewDevice.edit_group_host + ] + m_update.return_value = None + + response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_new) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert response.json == {'message': 'Host 3.3.3.3: already exists in group group_1. Record was not edited.'} + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_device_from_group_configured_in_inventory_failed(m_find, m_update, client): + ui_group_device_new = { + "address": "5.5.5.5", + "port": "161", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + + second_group_inventory = { + "_id": ObjectId(common_id), + "address": "group_2", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + + second_group_inventory_backend= { + "_id": ObjectId(common_id), + "group_2": [ + {"address": "5.5.5.5", "port": 161}, + ] + } + + m_find.side_effect = [ + [backend_group_update_device_old()], # call from group/routes.update_device_from_group + [group_inventory()], # call from HandleNewDevice.edit_group_host + [backend_group_update_device_old()], # call from HandleNewDevice.edit_group_host + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [second_group_inventory], # call from HandleNewDevice._is_host_in_group + [second_group_inventory_backend] # call from HandleNewDevice._is_host_in_group + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.update_device_from_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.edit_group_host + call({'_id': ObjectId(common_id)}), # call from HandleNewDevice.edit_group_host + call({'address': "5.5.5.5", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "5.5.5.5", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}) # call from HandleNewDevice._is_host_in_group + ] + + response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_new) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert response.json == {'message': 'Host 5.5.5.5:161 already exists in group group_2. Record was not edited.'} + + +# TEST DELETING DEVICE +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_delete_device_from_group_record(m_find, m_update, client): + + backend_group_old = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "2.2.2.3", "port": 1161, "version": "2c", "community": "public", + "security_engine": "1112233aabbccdee"}, + {"address": "3.3.3.3"} + ] + } + + backend_group_new1 = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "3.3.3.3"} + ] + } + + backend_group_new2 = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "3.3.3.3"} + ] + } + + m_find.return_value = [backend_group_old] + m_update.return_value = None + response = client.post(f"/devices/delete/{common_id}-1") + + assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new1}) + assert response.json == {'message': 'Device 2.2.2.3:1161 from group group_1 was deleted.'} + + m_find.return_value = [backend_group_new1] + response = client.post(f"/devices/delete/{common_id}-0") + assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new2}) + assert response.json == {'message': 'Device 1.1.1.1: from group group_1 was deleted.'} \ No newline at end of file diff --git a/backend/tests/ui_handling/post_endpoints/test_post_inventory.py b/backend/tests/ui_handling/post_endpoints/test_post_inventory.py new file mode 100644 index 0000000..ea28e2b --- /dev/null +++ b/backend/tests/ui_handling/post_endpoints/test_post_inventory.py @@ -0,0 +1,1263 @@ +from unittest import mock +from unittest.mock import call, Mock +from bson import ObjectId + + +common_id = "635916b2c8cb7a15f28af40a" + +# TEST ADDING A SINGLE HOST +ui_inventory_new = lambda : { + "inventoryType": "Host", + "address": "11.0.78.114", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False +} + +backend_inventory_new = lambda : { + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +ui_inventory_new_host_name = lambda : { + "inventoryType": "Host", + "address": "test", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False +} + +backend_inventory_new_host_name = lambda : { + "address": "test", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_single_host_success(m_find, m_insert, m_delete, client): + + # Test adding a new device, when there was no device with the same + # address and port with deleted flag set to True. + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'address': "11.0.78.114", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "11.0.78.114", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"11.0.78.114": {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + + response = client.post(f"/inventory/add", json=ui_inventory_new()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new()) + assert not m_delete.called + assert response.json == "success" + + # Test adding a new device when there was a device with the same + # address and port with deleted flag set to True. + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [{ + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + }], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + + response = client.post(f"/inventory/add", json=ui_inventory_new()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new()) + assert m_delete.call_args == call({"_id": ObjectId(common_id)}) + assert response.json == "success" + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_single_host_name_success(m_find, m_insert, m_delete, client): + # Test adding a new device, when there was no device with the same + # address and port with deleted flag set to True. + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'address': "test", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "test", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"test": {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=ui_inventory_new_host_name()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new_host_name()) + assert not m_delete.called + assert response.json == "success" + + # Test adding a new device when there was a device with the same + # address and port with deleted flag set to True. + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [{ + "_id": ObjectId(common_id), + "address": "test", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + }], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=ui_inventory_new_host_name()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new_host_name()) + assert m_delete.call_args == call({"_id": ObjectId(common_id)}) + assert response.json == "success" + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_single_host_failure(m_find, m_insert, m_delete, client): + + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [{ + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + }], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'address': "11.0.78.114", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "11.0.78.114", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({'11.0.78.114': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=ui_inventory_new()) + m_find.assert_has_calls(calls_find) + assert not m_delete.called + assert not m_insert.called + assert response.json == {"message": "Host 11.0.78.114:161 already exists in the inventory. " + "Record was not added."} + + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [{"test":[]}], # call from HandleNewDevice.add_single_host + ] + response = client.post(f"/inventory/add", json=ui_inventory_new_host_name()) + assert not m_delete.called + assert not m_insert.called + assert response.json == {"message": "There is a group with the same name configured. Record test can't be added as a single host."} + + +# TEST UPDATING A SINGLE HOST +backend_inventory_old = lambda : { + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 2000, + "security_engine": "1234aabbccd", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_edit_single_host_success(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + # Test editing a device without changing its address and port + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Host" + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [backend_inventory_old()], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [backend_inventory_old()], # call from HandleNewDevice.edit_single_host + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes.update_inventory_record + call({'address': "11.0.78.114", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "11.0.78.114", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"_id": ObjectId(common_id)}), # call from HandleNewDevice.edit_single_host + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new()}) + assert not m_insert.called + assert not m_delete.called + assert response.json == "success" + + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_edit_single_host_address_and_port_success(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + # Test editing a device with changing its address and port + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Host" + ui_inventory_new_address_port = { + "inventoryType": "Host", + "address": "1.0.0.0", + "port": "1111", + "version": "3", + "community": "", + "secret": "my_secret_new", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + backend_inventory_new_address_port = { + "address": "1.0.0.0", + "port": 1111, + "version": "3", + "community": "", + "secret": "my_secret_new", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + } + deleted_host_backend = { + "_id": ObjectId("43EE0BCBA668527E7106E4F5"), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + } + + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [], # call from HandleNewDevice._is_host_configured + [deleted_host_backend], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [backend_inventory_old()], # call from HandleNewDevice.edit_single_host + [], # call from HandleNewDevice._is_host_configured + [deleted_host_backend], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes.update_inventory_record + call({'address': "1.0.0.0", 'port': 1111, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.0.0.0", 'port': 1111, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"_id": ObjectId(common_id)}), + call({'address': "1.0.0.0", 'port': 1111, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.0.0.0", 'port': 1111, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({'1.0.0.0': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new_address_port) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new_address_port) + assert m_delete.call_args == call({"_id": ObjectId("43EE0BCBA668527E7106E4F5")}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) + assert response.json == { + "message": "Address or port was edited which resulted in deleting the old device and creating " \ + "the new one at the end of the list."} + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_edit_ip_to_hostname_success(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Host" + ui_inventory_new_address_port = { + "inventoryType": "Host", + "address": "test", + "port": "1111", + "version": "3", + "community": "", + "secret": "my_secret_new", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + backend_inventory_new_address_port = { + "address": "test", + "port": 1111, + "version": "3", + "community": "", + "secret": "my_secret_new", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + } + deleted_host_backend = { + "_id": ObjectId("43EE0BCBA668527E7106E4F5"), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + } + + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [], # call from HandleNewDevice._is_host_configured + [deleted_host_backend], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [backend_inventory_old()], # call from HandleNewDevice.edit_single_host + [], # call from HandleNewDevice._is_host_configured + [deleted_host_backend], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes.update_inventory_record + call({'address': "test", 'port': 1111, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "test", 'port': 1111, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"_id": ObjectId(common_id)}), + call({'address': "test", 'port': 1111, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "test", 'port': 1111, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({'test': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new_address_port) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new_address_port) + assert m_delete.call_args == call({"_id": ObjectId("43EE0BCBA668527E7106E4F5")}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) + assert response.json == { + "message": "Address or port was edited which resulted in deleting the old device and creating " \ + "the new one at the end of the list."} + + +backend_inventory_old = lambda : { + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 2000, + "security_engine": "1234aabbccd", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_edit_single_host_failed(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + existing_id = "035916b2c8cb7a15f28af40b" + + ui_inventory_new = { + "inventoryType": "Host", + "address": "0.0.0.0", + "port": "1161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Host" + + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [{ + "_id": ObjectId(existing_id), + "address": "0.0.0.0", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + }], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes.update_inventory_record + call({'address': "0.0.0.0", 'port': 1161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "0.0.0.0", 'port': 1161, "delete": True}), # call from HandleNewDevice._is_host_configured + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) + m_find.assert_has_calls(calls_find) + assert response.json == {"message": "Host 0.0.0.0:1161 already exists in the inventory. " + "Record was not edited."} + assert response.status_code == 400 + assert not m_insert.called + assert not m_update.called + assert not m_delete.called + + ui_inventory_new = { + "inventoryType": "Host", + "address": "test", + "port": "1161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [backend_inventory_old()], # call from HandleNewDevice.edit_single_host + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [{"test":[]}], # call from HandleNewDevice.add_single_host + ] + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) + assert response.json == {"message": "There is a group with the same name configured. Record test can't be added as a single host."} + assert response.status_code == 400 + assert not m_insert.called + assert not m_update.called + assert not m_delete.called + + + + + +# TEST ADDING A GROUP +new_group_ui_inventory = lambda : { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False +} + +new_group_backend_inventory = lambda :{ + "address": "group_1", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +new_group_backend = lambda : { + "_id": ObjectId(common_id), + "group_1": [{"address": "1.2.3.4"}] +} + +existing_group_backend = lambda : { + "_id": ObjectId("43EE0BCBA668527E7106E4F5"), + "group_2": [{"address": "0.0.0.0"}] +} + +existing_group_inventory_backend = lambda : { + "_id": ObjectId("43EE0BCBA668527E7106E4F5"), + "address": "group_2", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_success(m_find, m_insert, m_delete, client): + + m_insert.return_value = None + m_delete.return_value = None + + # Test adding a new group, when there was no group with the same name with deleted flag set to True + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend()], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "1.2.3.4", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.2.3.4", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'1.2.3.4': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(new_group_backend_inventory()) + assert not m_delete.called + assert response.json == "success" + + # Test adding a new group, when there was a group with the same name with deleted flag set to True + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [{ + "_id": ObjectId("83EE0BCBA668527E7106E4F5"), + "address": "group_3", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + }], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend()], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(new_group_backend_inventory()) + assert m_delete.call_args == call({"_id": ObjectId("83EE0BCBA668527E7106E4F5")}) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_which_exists_failure(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [new_group_backend_inventory()], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend()], # call from HandleNewDevice.add_group_to_inventory + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Group group_1 has already been added to the inventory. " + "Record was not added."} + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_with_hosts_configured_failure(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + new_group_ui_failure = { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + new_group_backend_failure = { + "_id": ObjectId(common_id), + "group_1": [{"address": "0.0.0.0"}] + } + + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend_failure], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "0.0.0.0", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "0.0.0.0", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'0.0.0.0': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=new_group_ui_failure) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't add group group_1. " + "Host 0.0.0.0:161 already exists in group group_2. Record was not added."} + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_with_host_configured_multiple_times_failure(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + new_group_ui_failure = { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + new_group_backend_failure = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "1.1.1.1"} + ] + } + + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend_failure], # call from HandleNewDevice.add_group_to_inventory + + # first iteration in HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + + # second iteration in HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + + # first iteration in HandleNewDevice.add_group_to_inventory + call({'address': "1.1.1.1", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.1.1.1", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'1.1.1.1': {"$exists": True}}), # call from HandleNewDevice.add_single_host + + # second iteration in HandleNewDevice.add_group_to_inventory + call({'address': "1.1.1.1", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.1.1.1", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'1.1.1.1': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=new_group_ui_failure) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't add group group_1. " + "Device 1.1.1.1:161 was configured multiple times in this group. " + "Record was not added."} + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_without_configuration(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Group group_1 doesn't exist in the configuration. " + "Record was not added."} + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_without_configuration_failure(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [new_group_backend_inventory()], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Group group_1 doesn't exist in the configuration. Record was not added."} + + + +# TEST UPDATING A GROUP + +ui_edited_inventory_group = lambda : { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False +} + +edited_inventory_group = lambda : { + "address": "group_1", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +backend_inventory_existing_edit_group = lambda : { + "_id": ObjectId(common_id), + "address": "group_1", + "port": 161, + "version": "2", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +backend_existing_edit_group = lambda : { + "_id": ObjectId(common_id), + "group_1": [{"address": "1.1.1.1"}] +} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_without_changing_name_success(m_find, m_insert, m_update, m_delete, client): + + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + [backend_existing_edit_group()], # call from inventory/routes/get_inventory_type + [backend_inventory_existing_edit_group()], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.edit_group_in_inventory + [{"group_1": []}], # call from HandleNewDevice.edit_group_in_inventory + [backend_inventory_existing_edit_group()] # call from HandleNewDevice.edit_group_in_inventory + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + call({"group_1": {"$exists": 1}}), # call from inventory/routes/get_inventory_type + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.edit_group_in_inventory + call({"group_1": {"$exists": 1}}), # call from HandleNewDevice.edit_group_in_inventory + call({"_id": ObjectId(common_id)}) # call from HandleNewDevice.edit_group_in_inventory + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_edited_inventory_group()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": edited_inventory_group()}) + assert not m_insert.called + assert not m_delete.called + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_with_changing_name_success(m_find, m_insert, m_update, m_delete, client): + + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + + new_name_group_ui = { + "inventoryType": "Group", + "address": "group_2", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + new_name_group = { + "address": "group_2", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + } + + second_group_backend = { + "_id": ObjectId("19E121BD031284F3CE845B72"), + "group_2": [] + } + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + [backend_existing_edit_group()], # call from inventory/routes/get_inventory_type + [], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.edit_group_in_inventory + [{"group_2": []}], # call from HandleNewDevice.edit_group_in_inventory + [backend_inventory_existing_edit_group()], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [second_group_backend] # call from HandleNewDevice.add_group_to_inventory + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + call({"group_1": {"$exists": 1}}), # call from inventory/routes/get_inventory_type + call({'address': "group_2", "delete": False}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_2", "delete": True}), # call from HandleNewDevice.edit_group_in_inventory + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice.edit_group_in_inventory + call({"_id": ObjectId(common_id)}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_2", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_2", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({"group_2": {"$exists": 1}}) # call from HandleNewDevice.add_group_to_inventory + ] + + response = client.post(f"/inventory/update/{common_id}", json=new_name_group_ui) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) + assert m_insert.call_args == call(new_name_group) + assert not m_delete.called + assert response.json == {"message": "Group name was edited which resulted in deleting the old group and creating new " \ + "one at the end of the list."} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_to_already_configured_failure(m_find, m_insert, m_update, m_delete, client): + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + + new_name_group_ui = { + "inventoryType": "Group", + "address": "group_2", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + inventory_existing_other_group = { + "_id": ObjectId("83EE0BCBA668527E7106E4F5"), + "address": "group_2", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + } + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + [backend_existing_edit_group()], # call from inventory/routes/get_inventory_type + [inventory_existing_other_group], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.edit_group_in_inventory + [{"group_2": []}], # call from HandleNewDevice.edit_group_in_inventory + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + call({"group_1": {"$exists": 1}}), # call from inventory/routes/get_inventory_type + call({'address': "group_2", "delete": False}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_2", "delete": True}), # call from HandleNewDevice.edit_group_in_inventory + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice.edit_group_in_inventory + ] + + response = client.post(f"/inventory/update/{common_id}", json=new_name_group_ui) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Group with name group_2 already exists. Record was not edited."} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_to_other_group_with_host_already_configured_failure(m_find, m_insert, m_update, m_delete, client): + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + + new_group_ui_failure = { + "inventoryType": "Group", + "address": "group_3", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + new_group_backend_failure = { + "_id": ObjectId(common_id), + "group_3": [{"address": "0.0.0.0"}] + } + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + [backend_existing_edit_group()], # call from inventory/routes/get_inventory_type + [], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.edit_group_in_inventory + [{"group_3": []}], # call from HandleNewDevice.edit_group_in_inventory + [backend_inventory_existing_edit_group()], # call from HandleNewDevice.edit_group_in_inventory + + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend_failure], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + call({"group_1": {"$exists": 1}}), # call from inventory/routes/get_inventory_type + call({'address': "group_3", "delete": False}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_3", "delete": True}), # call from HandleNewDevice.edit_group_in_inventory + call({"group_3": {"$exists": 1}}), # call from HandleNewDevice.edit_group_in_inventory + call({"_id": ObjectId(common_id)}), # call from HandleNewDevice.edit_group_in_inventory + + call({'address': "group_3", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_3", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_3': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "0.0.0.0", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "0.0.0.0", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'0.0.0.0': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/update/{common_id}", json=new_group_ui_failure) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't add group group_3. " + "Host 0.0.0.0:161 already exists in group group_2. Record was not added."} + + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_host_or_host_to_group_failure(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Group" + + ui_edit_group_to_host = { + "inventoryType": "Host", + "address": "1.1.1.1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_edit_group_to_host) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't edit single host to the group or group to the single host"} + + m_get_inventory_type.return_value = "Host" + backend_edit_host_to_group = { + "_id": ObjectId(common_id), + "address": "1.1.1.1", + "port": 161, + "version": "2", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + } + + ui_edit_group_to_host2 = { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + m_find.side_effect = [ + [backend_edit_host_to_group], # call from HandleNewDevice.update_inventory_record + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from HandleNewDevice.update_inventory_record + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_edit_group_to_host2) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't edit single host to the group or group to the single host"} + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_delete_inventory_record(m_find, m_update, client): + m_update.return_value = None + m_find.return_value = [{ + "_id": ObjectId(common_id), + "address": "group_1", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + }] + response = client.post(f"/inventory/delete/{common_id}") + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) + assert response.json == {"message": f"group_1 was deleted."} diff --git a/backend/tests/ui_handling/post_endpoints/test_post_profiles.py b/backend/tests/ui_handling/post_endpoints/test_post_profiles.py new file mode 100644 index 0000000..e2d1388 --- /dev/null +++ b/backend/tests/ui_handling/post_endpoints/test_post_profiles.py @@ -0,0 +1,274 @@ +from unittest import mock +from unittest.mock import call +from bson import ObjectId + + +# TEST ADDING PROFILE +@mock.patch("pymongo.collection.Collection.find") +@mock.patch("pymongo.collection.Collection.insert_one") +def test_add_profile_record_success(m_insert, m_find, client): + m_insert.return_value = None + m_find.return_value = [] + ui_prof = { + "profileName": "profile_1", + "frequency": 10, + "conditions": { + "condition": "standard", + "field": "", + "patterns": None + }, + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}] + } + backend_prof = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + response = client.post("/profiles/add", json=ui_prof) + assert m_find.call_args == call({"profile_1": {"$exists": True}}) + assert m_insert.call_args == call(backend_prof) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.find") +@mock.patch("pymongo.collection.Collection.insert_one") +def test_add_profile_record_failure(m_insert, m_find, client): + ui_prof = { + "profileName": "profile_1", + "frequency": 10, + "conditions": { + "condition": "None", + "field": "", + "patterns": None + }, + "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, + {"family": "IF-MIB", "category": "", "index": ""}, + {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + } + backend_prof = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + m_insert.return_value = None + m_find.return_value = [backend_prof] + + response = client.post("/profiles/add", json=ui_prof) + assert m_find.call_args == call({"profile_1": {"$exists": True}}) + assert not m_insert.called + assert response.json == {"message": f"Profile with name profile_1 already exists. Profile was not added."} + + +# TEST DELETING PROFILE +@mock.patch("pymongo.collection.Collection.find") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +def test_delete_profile_record(m_update, m_delete, m_find, client): + common_id = "635916b2c8cb7a15f28af40a" + profile = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + backend_inventory = { + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "profile_1;profile_2", + "smart_profiles": False, + "delete": False + } + + backend_inventory_update = { + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "profile_2", + "smart_profiles": False, + "delete": False + } + + m_find.side_effect = [ + [profile], + [backend_inventory] + ] + m_delete.return_value = None + m_update.return_value = None + + response = client.post(f'/profiles/delete/{common_id}') + + calls = [call({'_id': ObjectId(common_id)}, {"_id": 0}), call({"profiles": {"$regex": '.*profile_1.*'}, "delete": False})] + m_find.assert_has_calls(calls) + assert m_delete.call_args == call({"_id": ObjectId(common_id)}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_update}) + assert response.json == {"message": f"Profile profile_1 was deleted. It was also deleted from some inventory records."} + + +# TEST UPDATING PROFILE +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_profile_record_no_name_change_success(m_find, m_update, client): + common_id = "635916b2c8cb7a15f28af40a" + ui_prof_1_new = { + "profileName": "profile_1", + "frequency": 20, + "conditions": { + "condition": "smart", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] + }, + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}] + } + + backend_prof_1_old = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + backend_prof_1_new = { + "profile_1": { + "frequency": 20, + "condition": {"type": "field", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, + "varBinds": [["IF-MIB", "ifInDiscards", "1"]] + } + } + + m_find.side_effect = [[], [backend_prof_1_old]] + m_update.return_value = None + + response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) + assert m_update.call_args == call({'_id': ObjectId(common_id)}, + {"$set": {"profile_1": backend_prof_1_new["profile_1"]}}) + assert response.json == "success" + assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_profile_record_with_name_change_success(m_find, m_update, client): + common_id = "635916b2c8cb7a15f28af40a" + + backend_prof_1_old = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + ui_prof_1_new = { + "profileName": "profile_1_edit", + "frequency": 20, + "conditions": { + "condition": "smart", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] + }, + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}] + } + + backend_prof_1_new = { + "profile_1_edit": { + "frequency": 20, + "condition": {"type": "field", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, + "varBinds": [["IF-MIB", "ifInDiscards", "1"]] + } + } + + backend_inventory = { + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "profile_1;profile_2", + "smart_profiles": False, + "delete": False + } + + backend_inventory_update = { + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "profile_1_edit;profile_2", + "smart_profiles": False, + "delete": False + } + + m_find.side_effect = [ + [], + [backend_prof_1_old], + [backend_inventory] + ] + m_update.return_value = None + + calls_find = [call({f"profile_1_edit": {"$exists": True}, "_id": {"$ne": ObjectId(common_id)}}), + call({'_id': ObjectId(common_id)}, {"_id": 0}), + call({"profiles": {"$regex": '.*profile_1.*'}, "delete": False})] + + calls_update = [call({'_id': ObjectId(common_id)}, {"$rename": {"profile_1": "profile_1_edit"}}), + call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_update}), + call({'_id': ObjectId(common_id)}, + {"$set": {"profile_1_edit": backend_prof_1_new["profile_1_edit"]}})] + + response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) + + m_find.assert_has_calls(calls_find) + m_update.assert_has_calls(calls_update) + assert response.json == {"message": f"If profile_1 was used in some records in the inventory," + f" it was updated to profile_1_edit"} + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_profile_record_failure(m_find, m_update, client): + common_id = "635916b2c8cb7a15f28af40a" + ui_prof_1_new = { + "profileName": "profile_1", + "frequency": 20, + "conditions": { + "condition": "field", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] + }, + "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}] + } + + backend_prof_1_old = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + m_find.return_value = [backend_prof_1_old] + response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) + assert not m_update.called + assert response.json == {"message": "Profile with name profile_1 already exists. Profile was not edited."} \ No newline at end of file diff --git a/backend/tests/ui_handling/test_post_endpoints.py b/backend/tests/ui_handling/test_post_endpoints.py deleted file mode 100644 index d8157ca..0000000 --- a/backend/tests/ui_handling/test_post_endpoints.py +++ /dev/null @@ -1,998 +0,0 @@ -from unittest import mock -from unittest.mock import call, Mock -from bson import ObjectId - - -@mock.patch("pymongo.collection.Collection.insert_one") -def test_add_profile_record(m_client, client): - m_client.return_value = None - ui_prof = { - "profileName": "profile_1", - "frequency": 10, - "conditions": { - "condition": "None", - "field": "", - "patterns": None - }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] - } - - backend_prof = { - "profile_1": { - "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] - } - } - - response = client.post("/profiles/add", json=ui_prof) - - assert m_client.call_args == call(backend_prof) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.find") -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -def test_delete_profile_record(m_update, m_delete, m_find, client): - common_id = "635916b2c8cb7a15f28af40a" - profile = { - "profile_1": { - "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] - } - } - - backend_inventory = { - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "profile_1;profile_2", - "smart_profiles": False, - "delete": False - } - - backend_inventory_update = { - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "profile_2", - "smart_profiles": False, - "delete": False - } - - m_find.side_effect = [ - [profile], - [backend_inventory] - ] - m_delete.return_value = None - m_update.return_value = None - - response = client.post(f'/profiles/delete/{common_id}') - - calls = [call({'_id': ObjectId(common_id)}, {"_id": 0}), call({"profiles": {"$regex": '.*profile_1.*'}})] - m_find.assert_has_calls(calls) - assert m_delete.call_args == call({"_id": ObjectId(common_id)}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_update}) - assert response.json == {"message": f"If profile_1 was used in some records in the inventory," - f" those records were updated"} - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_update_profile_record_no_name_change(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - ui_prof_1_new = { - "profileName": "profile_1", - "frequency": 20, - "conditions": { - "condition": "field", - "field": "SNMPv2-MIB.sysObjectID", - "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] - }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}] - } - - backend_prof_1_old = { - "profile_1": { - "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] - } - } - - backend_prof_1_new = { - "profile_1": { - "frequency": 20, - "condition": {"type": "field", - "field": "SNMPv2-MIB.sysObjectID", - "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, - "varBinds": [["IF-MIB", "ifInDiscards", 1]] - } - } - - m_find.return_value = [backend_prof_1_old] - m_update.return_value = None - - response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) - assert m_update.call_args == call({'_id': ObjectId(common_id)}, - {"$set": {"profile_1": backend_prof_1_new["profile_1"]}}) - assert response.json == "success" - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_update_profile_record_with_name_change(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - backend_prof_1_old = { - "profile_1": { - "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] - } - } - - ui_prof_1_new = { - "profileName": "profile_1_edit", - "frequency": 20, - "conditions": { - "condition": "field", - "field": "SNMPv2-MIB.sysObjectID", - "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] - }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}] - } - - backend_prof_1_new = { - "profile_1_edit": { - "frequency": 20, - "condition": {"type": "field", - "field": "SNMPv2-MIB.sysObjectID", - "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, - "varBinds": [["IF-MIB", "ifInDiscards", 1]] - } - } - - backend_inventory = { - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "profile_1;profile_2", - "smart_profiles": False, - "delete": False - } - - backend_inventory_update = { - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "profile_1_edit;profile_2", - "smart_profiles": False, - "delete": False - } - - m_find.side_effect = [ - [backend_prof_1_old], - [backend_inventory] - ] - m_update.return_value = None - - calls_find = [call({'_id': ObjectId(common_id)}, {"_id": 0}), - call({"profiles": {"$regex": '.*profile_1.*'}})] - - calls_update = [call({'_id': ObjectId(common_id)}, {"$rename": {"profile_1": "profile_1_edit"}}), - call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_update}), - call({'_id': ObjectId(common_id)}, - {"$set": {"profile_1_edit": backend_prof_1_new["profile_1_edit"]}})] - - response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) - - m_find.assert_has_calls(calls_find) - m_update.assert_has_calls(calls_update) - assert response.json == {"message": f"If profile_1 was used in some records in the inventory," - f" it was updated to profile_1_edit"} - - -@mock.patch("pymongo.collection.Collection.insert_one") -def test_add_group_record(m_insert, client): - ui_group = { - "groupName": "group_1" - } - - backend_group = { - "group_1": [] - } - - response = client.post(f"/groups/add", json=ui_group) - m_insert.return_value = None - assert m_insert.call_args == call(backend_group) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_update_group(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_group_new = { - "_id": common_id, - "groupName": "group_1_edit" - } - - backend_group_old = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.2.3.4"}, - ] - } - - calls_update = [ - call({'_id': ObjectId(common_id)}, {"$rename": {"group_1": "group_1_edit"}}), - call({"address": "group_1"}, {"$set": {"address": 'group_1_edit'}}) - ] - - m_find.side_effect = [ - [], - [backend_group_old] - ] - - response = client.post(f"/groups/update/{common_id}", json=ui_group_new) - m_update.assert_has_calls(calls_update) - assert response.json == {"message": "group_1 was also renamed to group_1_edit in the inventory"} - - -@mock.patch("pymongo.collection.Collection.find") -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.MongoClient.start_session") -def test_delete_group_and_devices(m_session, m_update, m_delete, m_find, client): - common_id = "635916b2c8cb7a15f28af40a" - backend_group = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.2.3.4"}, - ] - } - m_session.return_value.__enter__.return_value.start_transaction.__enter__ = Mock() - - m_find.return_value = [backend_group] - m_delete.return_value = None - m_update.return_value = None - - response = client.post(f"/groups/delete/{common_id}") - assert m_find.call_args == call({'_id': ObjectId(common_id)}) - assert m_delete.call_args == call({'_id': ObjectId(common_id)}) - assert m_update.call_args == call({"address": "group_1"}, {"$set": {"delete": True}}) - assert response.json == {"message": "If group_1 was configured in the inventory, it was deleted from there"} - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_device_to_group(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_group_device_new = { - "address": "2.2.2.2", - "port": "", - "version": "3", - "community": "", - "secret": "snmpv3", - "securityEngine": "", - "groupId": str(common_id) - } - - backend_group_old = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.2.3.4"}, - ] - } - - backend_group_new = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.2.3.4"}, - {"address": "2.2.2.2", "version": "3", "secret": "snmpv3"} - ] - } - - m_find.return_value = [backend_group_old] - m_update.return_value = None - - response = client.post(f"/devices/add", json=ui_group_device_new) - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_update_device_from_group(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_group_device_update = { - "address": "2.2.2.3", - "port": "1161", - "version": "2c", - "community": "public", - "secret": "", - "securityEngine": "1112233aabbccdee", - "groupId": str(common_id) - } - - backend_group_old = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.1.1.1"}, - {"address": "2.2.2.2"}, - {"address": "3.3.3.3"} - ] - } - - backend_group_new = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.1.1.1"}, - {"address": "2.2.2.3", "port": 1161, "version": "2c", "community": "public", - "security_engine": "1112233aabbccdee"}, - {"address": "3.3.3.3"} - ] - } - - m_find.return_value = [backend_group_old] - m_update.return_value = None - - response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_update) - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_delete_device_from_group_record(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - backend_group_old = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.1.1.1"}, - {"address": "2.2.2.3", "port": 1161, "version": "2c", "community": "public", - "security_engine": "1112233aabbccdee"}, - {"address": "3.3.3.3"} - ] - } - - backend_group_new1 = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.1.1.1"}, - {"address": "3.3.3.3"} - ] - } - - backend_group_new2 = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "3.3.3.3"} - ] - } - - m_find.return_value = [backend_group_old] - m_update.return_value = None - response = client.post(f"/devices/delete/{common_id}-1") - - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new1}) - assert response.json == "success" - - m_find.return_value = [backend_group_new1] - response = client.post(f"/devices/delete/{common_id}-0") - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new2}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_inventory_record_single_host_success(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "11.0.78.114", - "port": "161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - backend_inventory_new = { - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - calls_find = [ - call({'address': "11.0.78.114", 'port': 161, "delete": False}), - call({'address': "11.0.78.114", 'port': 161, "delete": True}) - ] - - m_find.side_effect = [[], []] - # Test adding a new device, when there was no device with the same address and port with deleted flag set to True - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_insert.call_args == call(backend_inventory_new) - assert not m_update.called - assert not m_delete.called - assert response.json == "success" - - m_find.side_effect = [ - [], - [{ - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": True - }] - ] - - # Test adding a new device, when there was a device with the same address and port with deleted flag set to True - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_insert.call_args == call(backend_inventory_new) - assert not m_update.called - assert m_delete.call_args == call({"_id": ObjectId(common_id)}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_edit_inventory_record_single_host_success(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "11.0.78.114", - "port": "161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - backend_inventory_new = { - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - calls_find = [ - call({'address': "11.0.78.114", 'port': 161, "delete": False}), - call({'address': "11.0.78.114", 'port': 161, "delete": True}) - ] - - m_find.side_effect = [[], []] - - # Test editing a device with changing its address and port - response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new}) - assert not m_insert.called - assert not m_delete.called - assert response.json == "success" - - m_find.side_effect = [ - [{ - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": True - }], - [] - ] - - # Test editing a device without changing its address and port - response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new}) - assert not m_insert.called - assert not m_delete.called - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_inventory_record_single_host_failed(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "11.0.78.114", - "port": "1161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [ - [{ - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 1161, - "version": "2c", - "community": "public", - "secret": "", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": False - }], - [] - ] - - calls_find = [ - call({'address': "11.0.78.114", 'port': 1161, "delete": False}), - call({'address': "11.0.78.114", 'port': 1161, "delete": True}) - ] - - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"Inventory record for 11.0.78.114:1161 already exists. Record was not added."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_edit_inventory_record_single_host_failed(m_find, m_insert, m_update, m_delete, client): - edit_id = "635916b2c8cb7a15f28af40a" - existing_id = "035916b2c8cb7a15f28af40b" - - ui_inventory_new = { - "address": "11.0.78.114", - "port": "1161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [ - [{ - "_id": ObjectId(existing_id), - "address": "11.0.78.114", - "port": 1161, - "version": "2c", - "community": "public", - "secret": "", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": False - }], - [] - ] - - calls_find = [ - call({'address': "11.0.78.114", 'port': 1161, "delete": False}), - call({'address': "11.0.78.114", 'port': 1161, "delete": True}) - ] - - response = client.post(f"/inventory/update/{edit_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"Inventory record for 11.0.78.114:1161 already exists. Record was not edited."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_inventory_record_group_success(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "group_1", - "port": "161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - backend_inventory_new = { - "address": "group_1", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [ - [], - [], - [{ - "_id": ObjectId(common_id), - "group_1": [{"address": "1.2.3.4"}] - }] - ] - - calls_find = [ - call({'address': "group_1", "delete": False}), - call({'address': "group_1", "delete": True}), - call({'group_1': {"$exists": 1}}) - ] - - # Test adding a new group, when there was no group with the same name with deleted flag set to True - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_insert.call_args == call(backend_inventory_new) - assert not m_update.called - assert not m_delete.called - assert response.json == "success" - - # Test adding a new group, when there was a group with the same name with deleted flag set to True - m_find.side_effect = [ - [], - [{ - "_id": ObjectId(common_id), - "address": "group_1", - "port": 1161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": True - }], - [{ - "_id": ObjectId(common_id), - "group_1": [{"address": "1.2.3.4"}] - }] - ] - - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_insert.call_args == call(backend_inventory_new) - assert not m_update.called - assert m_delete.call_args == call({"_id": ObjectId(common_id)}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_edit_inventory_record_group_success(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "group_1", - "port": "161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - backend_inventory_new = { - "address": "group_1", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [[], [], [{"_id": ObjectId(common_id), "group_1": [{"address": "1.2.3.4"}]}]] - - calls_find = [ - call({'address': "group_1", "delete": False}), - call({'address': "group_1", "delete": True}), - call({'group_1': {"$exists": 1}}) - ] - - # Test editing a group with changing group name - response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new}) - assert not m_insert.called - assert not m_delete.called - assert response.json == "success" - - m_find.side_effect = [[{ - "_id": ObjectId(common_id), - "address": "group_1", - "port": 1161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": True - }], - [], - [{"_id": ObjectId(common_id), "group_1": [{"address": "1.2.3.4"}]}]] - - # Test editing a group without changing group name - response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new}) - assert not m_insert.called - assert not m_delete.called - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_inventory_record_group_failed(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "group_1", - "port": "1161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - # Test adding a new group, when the same group is already in the inventory. - m_find.side_effect = [ - [{ - "_id": ObjectId(common_id), - "address": "group_1", - "port": 161, - "version": "2c", - "community": "public", - "secret": "", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": False - }], - [], - [{ - "_id": ObjectId(common_id), - "group_1": [{"address": "1.2.3.4"}] - }] - ] - - calls_find = [ - call({'address': "group_1", "delete": False}), - call({'address': "group_1", "delete": True}), - call({'group_1': {"$exists": 1}}) - ] - - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"Inventory record for group_1 already exists. Record was not added."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - # Test adding a new group, when there is no group configured. - m_find.side_effect = [ - [], - [], - [] - ] - - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"There is no group group_1 configured. Record was not added."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_edit_inventory_record_group_failed(m_find, m_insert, m_update, m_delete, client): - edit_id = "635916b2c8cb7a15f28af40a" - existing_id = "035916b2c8cb7a15f28af40b" - - ui_inventory_new = { - "address": "group_1", - "port": "1161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [ - [{ - "_id": ObjectId(existing_id), - "address": "group_1", - "port": 1161, - "version": "2c", - "community": "public", - "secret": "", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": False - }], - [], - [{ - "_id": ObjectId(existing_id), - "group_1": [{"address": "1.2.3.4"}] - }] - ] - - calls_find = [ - call({'address': "group_1", "delete": False}), - call({'address': "group_1", "delete": True}) - ] - - response = client.post(f"/inventory/update/{edit_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"Inventory record for group_1 already exists. Record was not edited."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - m_find.side_effect = [ - [], - [], - [] - ] - - # Test editing a group, when there is no group configured. - response = client.post(f"/inventory/update/{edit_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"There is no group group_1 configured. Record was not edited."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - -@mock.patch("pymongo.collection.Collection.update_one") -def test_delete_inventory_record(m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - m_update.return_value = None - response = client.post(f"/inventory/delete/{common_id}") - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) - assert response.json == "success" diff --git a/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml b/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml new file mode 100644 index 0000000..6e18d10 --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml @@ -0,0 +1,4 @@ +inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;in_profile,t,f + group1,1161,2c,public,,,1800,single_metric;multiple_conditions,f,f diff --git a/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml b/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml new file mode 100644 index 0000000..4017b3c --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml @@ -0,0 +1,12 @@ +group1: +- address: 52.14.243.157 + port: 1163 +- address: 20.14.10.0 + port: 161 +group2: +- address: 0.10.20.30 +- address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + security_engine: aabbccdd1234 diff --git a/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml b/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml new file mode 100644 index 0000000..151a2ad --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml @@ -0,0 +1,51 @@ +single_metric: + frequency: 60 + varBinds: + - ['IF-MIB', 'ifMtu', '1'] +small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] +gt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + - ['IF-MIB', 'ifOutErrors'] + - ['IF-MIB', 'ifOutOctets'] \ No newline at end of file diff --git a/backend/tests/yamls_for_tests/reference_files/values.yaml b/backend/tests/yamls_for_tests/reference_files/values.yaml new file mode 100644 index 0000000..8bc3a57 --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/values.yaml @@ -0,0 +1,161 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + group1: + - address: 52.14.243.157 + port: 1163 + - address: 20.14.10.0 + port: 161 + group2: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + security_engine: aabbccdd1234 + profiles: | + single_metric: + frequency: 60 + varBinds: + - ['IF-MIB', 'ifMtu', '1'] + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + gt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + - ['IF-MIB', 'ifOutErrors'] + - ['IF-MIB', 'ifOutOctets'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;in_profile,t,f + group1,1161,2c,public,,,1800,single_metric;multiple_conditions,f,f +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true diff --git a/backend/tests/yamls_for_tests/values_test/.gitignore b/backend/tests/yamls_for_tests/values_test/.gitignore new file mode 100644 index 0000000..0372f75 --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/.gitignore @@ -0,0 +1 @@ +sc4snmp_ui_*.yaml diff --git a/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml b/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml new file mode 100644 index 0000000..fc5bebf --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml @@ -0,0 +1,139 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + some_group: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + - address: 10.1.3.157 + port: 1165 + profiles: | + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + conditional_profile_greater_than: + frequency: 100 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;conditional_profile_greater_than,t,f + some_group,1161,2c,public,,,1800,single_metric;in_profile,f,t + 156.0.10.91,161,2c,public,,,1800,conditional_profile_greater_than,t,t +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true diff --git a/backend/tests/yamls_for_tests/values_test/values.yaml b/backend/tests/yamls_for_tests/values_test/values.yaml new file mode 100644 index 0000000..6b88a85 --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/values.yaml @@ -0,0 +1,139 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + some_group: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + - address: 10.1.3.157 + port: 1165 + profiles: | + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + conditional_profile_greater_than: + frequency: 100 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;conditional_profile_greater_than,t,f + some_group,1161,2c,public,,,1800,single_metric;in_profile,f,t + 156.0.10.91,161,2c,public,,,1800,conditional_profile_greater_than,t,t +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true diff --git a/frontend/Dockerfile b/frontend/Dockerfile index 03c6477..735ce7b 100644 --- a/frontend/Dockerfile +++ b/frontend/Dockerfile @@ -3,6 +3,7 @@ WORKDIR /frontend ENV PATH /frontend/node_modules/.bin:$PATH COPY package.json yarn.lock lerna.json ./ COPY ./packages ./packages +RUN apk add --update python3 make g++ && rm -rf /var/cache/apk/* RUN yarn install RUN yarn run build RUN apk add curl diff --git a/frontend/lerna.json b/frontend/lerna.json index 0159799..21878c4 100644 --- a/frontend/lerna.json +++ b/frontend/lerna.json @@ -1,6 +1,6 @@ { "lerna": "^6.6.2", - "version": "0.0.1", + "version": "1.0.0-beta.18", "command": { "publish": { "ignoreChanges": ["*.md"] diff --git a/frontend/packages/manager/demo/splunk-app/appserver/templates/demo.html b/frontend/packages/manager/demo/splunk-app/appserver/templates/demo.html index 1ff433a..1e49b85 100644 --- a/frontend/packages/manager/demo/splunk-app/appserver/templates/demo.html +++ b/frontend/packages/manager/demo/splunk-app/appserver/templates/demo.html @@ -5,7 +5,7 @@ - Manager Demo App + SC4SNMP Manager diff --git a/frontend/packages/manager/demo/standalone/index.html b/frontend/packages/manager/demo/standalone/index.html index 8912646..dc7ac45 100644 --- a/frontend/packages/manager/demo/standalone/index.html +++ b/frontend/packages/manager/demo/standalone/index.html @@ -3,7 +3,7 @@ - Manager + SC4SNMP Manager diff --git a/frontend/packages/manager/jest.config.js b/frontend/packages/manager/jest.config.js index af03d11..a040538 100644 --- a/frontend/packages/manager/jest.config.js +++ b/frontend/packages/manager/jest.config.js @@ -1,3 +1,7 @@ module.exports = { - testMatch: ['**/*.unit.[jt]s?(x)'], + testMatch: ['**/*.test.[jt]s?(x)'], + testEnvironment: "jsdom", + setupFilesAfterEnv: [ + "@testing-library/jest-dom/extend-expect" + ] }; diff --git a/frontend/packages/manager/package.json b/frontend/packages/manager/package.json index 98dc49b..bf67755 100644 --- a/frontend/packages/manager/package.json +++ b/frontend/packages/manager/package.json @@ -1,6 +1,6 @@ { "name": "@splunk/manager", - "version": "0.0.1", + "version": "1.0.0-beta.18", "license": "UNLICENSED", "scripts": { "build": "NODE_ENV=production webpack --bail --config demo/webpack.standalone.config.js", @@ -14,7 +14,7 @@ "start:app": "webpack --watch --config demo/webpack.splunkapp.config.js", "start:demo": "webpack-dev-server --config demo/webpack.standalone.config.js --port ${DEMO_PORT-8080} --host 0.0.0.0", "stylelint": "stylelint \"src/**/*.{js,jsx}\" --config stylelint.config.js", - "test": "jest", + "test": "DEBUG_PRINT_LIMIT=1000000 jest", "test:ci": "JEST_JUNIT_OUTPUT_DIR=./test-reports JEST_JUNIT_OUTPUT_NAME=unit-results.xml JEST_JUNIT_CLASSNAME=unit yarn run test --ci --reporters=default jest-junit --coverage --coverageDirectory=coverage_report/coverage_maps_unit --coverageReporters=cobertura", "test:watch": "jest --watch" }, @@ -26,7 +26,9 @@ "css-loader": "^6.7.1", "react-router-dom": "6", "scriptjs": "^2.5.9", - "style-loader": "^3.3.1" + "style-loader": "^3.3.1", + "history": "5.3.0", + "qs": "6.11.2" }, "devDependencies": { "@babel/core": "^7.2.0", @@ -35,6 +37,11 @@ "@splunk/splunk-utils": "^2.3.4", "@splunk/stylelint-config": "^4.0.0", "@splunk/webpack-configs": "^5.0.0", + "@testing-library/react": "12.1.2", + "@testing-library/dom": "9.3.1", + "@testing-library/jest-dom": "5.16.5", + "@jest/globals": "^29.6.1", + "jest-environment-jsdom": "^29.6.1", "babel-eslint": "^10.1.0", "babel-loader": "^8.0.4", "chai": "^3.5.0", diff --git a/frontend/packages/manager/src/Manager.jsx b/frontend/packages/manager/src/Manager.jsx index 1e0f518..c8b2fe9 100644 --- a/frontend/packages/manager/src/Manager.jsx +++ b/frontend/packages/manager/src/Manager.jsx @@ -1,12 +1,9 @@ -import React, {useCallback, useContext, useState} from 'react'; -import { Link, Route, Routes, Switch } from 'react-router-dom'; +import React from 'react'; import ErrorsModal from "./components/ErrorsModal"; import Menu from "./components/menu_header/Menu"; import Header from "./components/menu_header/Header"; import TabPanels from "./components/menu_header/TabPanels"; -import MenuHeaderContxt from './store/menu-header-contxt'; - import { ButtonsContextProvider } from "./store/buttons-contx"; import { ErrorsModalContextProvider } from "./store/errors-modal-contxt"; @@ -18,7 +15,7 @@ import { GroupContextProvider } from "./store/group-contxt"; import { FontStyles } from "./styles/FontsStyles"; function Uncontrolled() { - const MenuCtx = useContext(MenuHeaderContxt); + return ( @@ -33,7 +30,6 @@ function Uncontrolled() { - diff --git a/frontend/packages/manager/src/components/ButtonsModal.jsx b/frontend/packages/manager/src/components/ButtonsModal.jsx deleted file mode 100644 index 575712a..0000000 --- a/frontend/packages/manager/src/components/ButtonsModal.jsx +++ /dev/null @@ -1,34 +0,0 @@ -import React, {useState, useRef, useContext} from 'react'; -import Button from '@splunk/react-ui/Button'; -import ControlGroup from '@splunk/react-ui/ControlGroup'; -import Modal from '@splunk/react-ui/Modal'; -import P from '@splunk/react-ui/Paragraph'; -import Select from '@splunk/react-ui/Select'; -import Multiselect from '@splunk/react-ui/Multiselect'; -import Text from '@splunk/react-ui/Text'; -import ButtonsContext from "../store/buttons-contx"; - -function ButtonsModal(props) { - const BtnCtx = useContext(ButtonsContext); - - const handleRequestClose = () => { - BtnCtx.setButtonsOpen(false); - }; - - return ( -
- - - -
- ); -} - -export default ButtonsModal; diff --git a/frontend/packages/manager/src/components/DeleteModal.jsx b/frontend/packages/manager/src/components/DeleteModal.jsx index d45a03f..10eaf4d 100644 --- a/frontend/packages/manager/src/components/DeleteModal.jsx +++ b/frontend/packages/manager/src/components/DeleteModal.jsx @@ -1,8 +1,8 @@ -import React, { useCallback, useState, useRef, useContext } from 'react'; +import React, { useCallback, useState, useContext } from 'react'; import Button from '@splunk/react-ui/Button'; import Modal from '@splunk/react-ui/Modal'; -import axios from "axios"; import P from '@splunk/react-ui/Paragraph'; +import Message from "@splunk/react-ui/Message"; import ButtonsContext from "../store/buttons-contx"; function DeleteModal(props) { @@ -26,10 +26,14 @@ function DeleteModal(props) {

Are you sure you want to delete {props.deleteName} ?

+ {("customWarning" in props && props["customWarning"] != null) ? + ( + {props["customWarning"]} + ) : null}
-