From f25ff6d296ee57cbecd971c887a97ba12e989c6d Mon Sep 17 00:00:00 2001 From: wojtekzyla <108660584+wojtekzyla@users.noreply.github.com> Date: Wed, 4 Oct 2023 11:15:27 +0200 Subject: [PATCH] Develop (#50) * fix: refactor routes in flask, refactor InventoryList.jsx, change delete button in delete modal to primary, after deleting anything always display confiramtion, change label on the button in the inventory, add a unit to the polling frequency fix: display config from the inventory for the devices in groups fix: unfinished validation of new hosts and group fix: validation of new devices and groups fix: comment out obsolete test - they will be updated while creating test suit * fix: add permissions to the release job in workflow fix: combine frontend and bacnkend build to one workflow, create release workflow fix: run build frontent and backend in seperate jobs fix: don't build pr images fix: update .releaserc * Update ci-build.yaml Don't push docker images in ci-build.yaml workflow Revert "Update ci-build.yaml" This reverts commit 2e57423c4aa4f0f1b71121992edc217c9c742c90. fix: change yarn network-timeout fix: Don't push docker images in ci-build.yaml workflow, don't update yarn network-timeout fix: install python in frontend docker image fix: install python3 on alpine linux fix: add make in frontend Dockerfile fix: rename workflow name to ci-build fix: frontend dockerfile install python2 fix: dockerfile change * fix: fix invalid escape sequence \s in .releaserc * fix: remove extra quotation mark in .releaserc * fix: update .releaserc * fix: update .releaserc * fix: fix matches in .releaserc * chore(release): 1.0.0-beta.1 # 1.0.0-beta.1 (2023-07-05) ### Bug Fixes * add collapsible rows in profiles ([23035bb](https://github.com/splunk/SC4SNMP-UI/commit/23035bbc155092d17b4bc95c3df0836fcec3649a)) * add gunicorn logger, add test directory, add requirements ([1ba2cad](https://github.com/splunk/SC4SNMP-UI/commit/1ba2cad93657474dca82b641a89b02c2ad8676eb)) * add permissions to the release job in workflow ([71cec71](https://github.com/splunk/SC4SNMP-UI/commit/71cec71df1b70077c03c1b0a0e7538824cbc0f22)) * add SC4SNMP_UI_backend directory in backend Dockerfile ([fa35651](https://github.com/splunk/SC4SNMP-UI/commit/fa3565196b15614c48b18cc82310e21b6bdb7ef6)) * add validation to check if community is provided if version is 1 or 2c ([816c8a8](https://github.com/splunk/SC4SNMP-UI/commit/816c8a8d983659f552fe5215da4a9e7ebcecbb9f)) * added comments and deleted profile validation context ([f2adcda](https://github.com/splunk/SC4SNMP-UI/commit/f2adcdaca0709a2f75582ba1f32011bb6d845f3b)) * added error style to inventory and groups forms while validating ([abbda32](https://github.com/splunk/SC4SNMP-UI/commit/abbda32acd5b3c345ffb07b7e69953c7c549f09f)) * added Proxima Nova font, finished inventory ([6d56473](https://github.com/splunk/SC4SNMP-UI/commit/6d564738e7409f9069ff725fd3a473680c695fa4)) * added requirements.txt ([d8eb307](https://github.com/splunk/SC4SNMP-UI/commit/d8eb307370125facbc3ea3cc75e8c016604ff62a)) * added semicolon ([6f336ba](https://github.com/splunk/SC4SNMP-UI/commit/6f336ba02b206630aaf2d2007c3fddace6cb9982)) * adding inventory fields without base and mandatory profiles ([2b28113](https://github.com/splunk/SC4SNMP-UI/commit/2b28113d4db43220261c57921823072651f07443)) * auto refresh state after deleting or adding profile ([6e1da8c](https://github.com/splunk/SC4SNMP-UI/commit/6e1da8c5091d92b0aed90d2a36b059ad5ae207d2)) * automatically add base and mandatory profiles to the new inventory, delete inventory records ([7851aa7](https://github.com/splunk/SC4SNMP-UI/commit/7851aa7fc8d16c36276adef01fb47fead08b54b4)) * backend for delete and edit objects by ID instead of profile name ([87ef861](https://github.com/splunk/SC4SNMP-UI/commit/87ef86159f067f8fb77bf7fe9477c1cb088bf4f3)) * bolded header in the inventory modal ([9f9e6ad](https://github.com/splunk/SC4SNMP-UI/commit/9f9e6ade173afc1f079e81047c83d01623fa96cc)) * change backendHost to use it on local machine ([e6b11de](https://github.com/splunk/SC4SNMP-UI/commit/e6b11de4476b554d0e1c80021c42c65895832968)) * change docker image repository name ([053e6b7](https://github.com/splunk/SC4SNMP-UI/commit/053e6b7f43912c09015f3118ead83d51ab66bd0c)) * change modal name to Edit or Add depending on action ([4a01627](https://github.com/splunk/SC4SNMP-UI/commit/4a01627a37027d40cdc4c6f9d0b5f9fb090ed813)) * change order of menu tabs, change 'IP address/Group' field from group devices configuration to 'IP address' ([156fc6d](https://github.com/splunk/SC4SNMP-UI/commit/156fc6d43f02166140543373cc3920bea227f77d)) * change organisation of files ([c129eb1](https://github.com/splunk/SC4SNMP-UI/commit/c129eb1fe064eb7053cf3283d8eb54946d0556c9)) * change url link of inventory to the updated version of API endpoints ([b8bcc77](https://github.com/splunk/SC4SNMP-UI/commit/b8bcc77bba4e76b46132c6a0af0a963378756893)) * changed the way of accesing colection in endpoints, changed how updates of profiles are made, created check_if_inventory_can_be_added, changed the way of deleting group from inventory when the group was deleted ([cf9f482](https://github.com/splunk/SC4SNMP-UI/commit/cf9f4827c9b6b422e18495b0cbce0cf648adca39)) * changed variables names ([a810fd6](https://github.com/splunk/SC4SNMP-UI/commit/a810fd68983c766074e18f7766b005a460767263)) * changin variables including 'Id' in the name from 'myNameID' to 'myNameId' ([095b49c](https://github.com/splunk/SC4SNMP-UI/commit/095b49cef4dfd4fe062d7a64c333ecb2c856e1fa)) * condition validation and partially working varBinds validation ([28ca072](https://github.com/splunk/SC4SNMP-UI/commit/28ca0720516faa9a77bfc1e3cd2a62ca672233ba)) * converting profiles between two formats ([6befac7](https://github.com/splunk/SC4SNMP-UI/commit/6befac7f209cc1a445392336cef115f8056a1d94)) * create modal for displaying messages from backend ([00138ff](https://github.com/splunk/SC4SNMP-UI/commit/00138ffc9e2091e334bf993a964334c13226e297)) * create new groups column in groups UI ([1bb9e10](https://github.com/splunk/SC4SNMP-UI/commit/1bb9e10fc6d914db11c5916cf2aeceed1e894de6)) * cross-origin ([f1946a5](https://github.com/splunk/SC4SNMP-UI/commit/f1946a529ec646113ee0d0412040db25b67bac9c)) * delete and edit objects by ID instead of profile name ([75705b7](https://github.com/splunk/SC4SNMP-UI/commit/75705b7a6d2de78978b7e21ec531a39fbefe6741)) * delete and update endpoints ([c67b2bc](https://github.com/splunk/SC4SNMP-UI/commit/c67b2bc6abce7dc1b6e903b0081af09dd7d286fe)) * delete inventory.py file ([bf557ba](https://github.com/splunk/SC4SNMP-UI/commit/bf557baf1ae98ad3c0946f96c4f84840db0e9b46)) * delete unnecessary logger ([82da626](https://github.com/splunk/SC4SNMP-UI/commit/82da626b0928a7e3d7cf81f82bf37b5076859094)) * deleted inventory_old.py ([ef747dd](https://github.com/splunk/SC4SNMP-UI/commit/ef747dd0c8ac2de67c08fc3ccec5257c36b77760)) * deleting profiles ([24c3417](https://github.com/splunk/SC4SNMP-UI/commit/24c34178e9add3b9604c0ee681806e062af35f99)) * display messages from backend when updating inventory and updating group ([dfe6978](https://github.com/splunk/SC4SNMP-UI/commit/dfe6978938739e4af69f4a60eb3ec49cdb1b1301)) * display messages from backend when updating or deleting profiles ([d917657](https://github.com/splunk/SC4SNMP-UI/commit/d91765752a867f788521c652021da1c42845cc42)) * don't change values after update window is closed without submiting changes ([4cf7249](https://github.com/splunk/SC4SNMP-UI/commit/4cf7249ee2af907cc3ed78ffb162c6d6bdcac9ba)) * edit apropriate FormRows.Row in varBinds and Conditions ([0253391](https://github.com/splunk/SC4SNMP-UI/commit/025339150adafd954e603cd182c901cfc1ad16c5)) * edit inventory records with groups ([67cbbc5](https://github.com/splunk/SC4SNMP-UI/commit/67cbbc5396d4ff0c0a218be8234000eb40b1eb37)) * editing and deleting inventory records ([ea80642](https://github.com/splunk/SC4SNMP-UI/commit/ea80642b18b29b21eed79fede82c7d742ea94855)) * editing and deleting inventory records - backend ([bf463e8](https://github.com/splunk/SC4SNMP-UI/commit/bf463e8eb6b3bfa5f26a5083408148e2436e3752)) * error message for security engine - fixed grammar error ([33b1bc8](https://github.com/splunk/SC4SNMP-UI/commit/33b1bc82d9dbbb10aa64a4eb14dfcc2a1f6b680d)) * error message for security engine informing that all the letter must be either upper or lowe case ([2503a51](https://github.com/splunk/SC4SNMP-UI/commit/2503a51aba3dba66872b9fdf79b8ab0a1caba9e7)) * finished groups UI ([5bfb791](https://github.com/splunk/SC4SNMP-UI/commit/5bfb791bbe89e27dd2fb769a6bcf0509c2b98337)) * first version of edits ([f49a759](https://github.com/splunk/SC4SNMP-UI/commit/f49a759dee5cd1236ab0392eb01a546ff9052b99)) * first version of working group form without validation, refactored delete modal ([f291d15](https://github.com/splunk/SC4SNMP-UI/commit/f291d1518f1802859523b056eb7d23a4495ccb46)) * first version of working group form without validation, refactored delete modal - backend ([5d8e1ee](https://github.com/splunk/SC4SNMP-UI/commit/5d8e1ee7c4a6cc9e7cb6f88cde790c30c46d85ba)) * fix invalid escape sequence \s in .releaserc ([be8a89f](https://github.com/splunk/SC4SNMP-UI/commit/be8a89faf4b5ba2a46e8a6eec07920a848bb1461)) * fix matches in .releaserc ([4710923](https://github.com/splunk/SC4SNMP-UI/commit/4710923544eedc10fc8bd5b3a369a73495495f6b)) * fixed bug that made edit instead of new profile after previously editing profile ([1214370](https://github.com/splunk/SC4SNMP-UI/commit/12143705b6f4007aa18f20cafc4236b83c0c1428)) * fixed conditions editing ([3281c7b](https://github.com/splunk/SC4SNMP-UI/commit/3281c7bf8ff434d1fedc83f26d454d61b33594f6)) * fixed error with adding and deleting conditions' patterns while editing ([45870c9](https://github.com/splunk/SC4SNMP-UI/commit/45870c999003c6a4559d60b6c64a38b9bd26a53f)) * fixed error with adding varBinds while editing ([02efd96](https://github.com/splunk/SC4SNMP-UI/commit/02efd96f119f24120016c114b7b80efbebe597ac)) * fixed error with deleteing last device from the current page ([25105d9](https://github.com/splunk/SC4SNMP-UI/commit/25105d917ace0f482c217908fda7285d801eeaa2)) * fixed error with deleting varBinds ([77609a4](https://github.com/splunk/SC4SNMP-UI/commit/77609a46542a493bc22799d6ad39109e8d6abe95)) * fixed issue with adding device instead of updating if nothing is changed in updating form ([cbbd2b1](https://github.com/splunk/SC4SNMP-UI/commit/cbbd2b1403f0182a8863fcdbf17b693b88316736)) * fixed varBinds validation ([eec377f](https://github.com/splunk/SC4SNMP-UI/commit/eec377fcc5ea2590b7c81d56c6dfb1efb2420033)) * fixed varBinds validation ([f93fdf6](https://github.com/splunk/SC4SNMP-UI/commit/f93fdf6cb85f4dd8f80aab040415f4feb7201de3)) * fixed varBinds validation ([952eeb8](https://github.com/splunk/SC4SNMP-UI/commit/952eeb851d971956435998a8a1bd4f759dc950b9)) * fixed varBinds validation ([7b0c888](https://github.com/splunk/SC4SNMP-UI/commit/7b0c8881c962b39f4b033c825e80827e1412c63a)) * fixes of varBinds errors ([7eb0d62](https://github.com/splunk/SC4SNMP-UI/commit/7eb0d6274a016df5b8198ff3283d73690c552282)) * fixing bug in pagination ([be8981c](https://github.com/splunk/SC4SNMP-UI/commit/be8981c43d04d509db020c80fd9719e4478869e8)) * flask tests ([60eb305](https://github.com/splunk/SC4SNMP-UI/commit/60eb3050c64e798bdca21d0a6874d2544a3a17f9)) * get host from environment variable ([83605c1](https://github.com/splunk/SC4SNMP-UI/commit/83605c11b1afbcce78a3bc9a840cb55ce7ea40d6)) * groups integration with sc4snmp database ([99c072d](https://github.com/splunk/SC4SNMP-UI/commit/99c072dbd481526d6d4e465ff4ddc689413328c2)) * if profile name is updated or the whole profile is deleted, update corresponding record in the inventory ([f9fd0a8](https://github.com/splunk/SC4SNMP-UI/commit/f9fd0a815a790d154050c0a26e9880cd95de639f)) * improved IPv4 regex ([49c046f](https://github.com/splunk/SC4SNMP-UI/commit/49c046f7a7675df90114abb1e63575758e7a434e)) * improved reload after deleting, adding or updating device in group ([49fc9b6](https://github.com/splunk/SC4SNMP-UI/commit/49fc9b669c7314191507153d7f31c4829ff383ff)) * InventoryContext, auto reload Inventory after adding new device ([14acbf8](https://github.com/splunk/SC4SNMP-UI/commit/14acbf876c80c5b7368608d2ebbfbca33636a945)) * loading inventory from mongo ([7df8cfb](https://github.com/splunk/SC4SNMP-UI/commit/7df8cfb74e404d1ec0fee2fb8ebc2436fcfc3640)) * logic for inventory validation ([60de7a3](https://github.com/splunk/SC4SNMP-UI/commit/60de7a3034160f523ede7347ce1cca6ad3d77580)) * logic for profiles validation ([373747f](https://github.com/splunk/SC4SNMP-UI/commit/373747f9e7e6ff904842bc5995b985e52f0ce1a1)) * more options in profile condition selection ([9f4ba5b](https://github.com/splunk/SC4SNMP-UI/commit/9f4ba5b287d0ae2eb862c9720c65679f63b181f5)) * more post endpoint tests ([914b241](https://github.com/splunk/SC4SNMP-UI/commit/914b241c85d562fd22a5a1b254efcd1f2ce5b8ef)) * move error styles to the seperate file ([40b5917](https://github.com/splunk/SC4SNMP-UI/commit/40b59179e607d21472e05f24249c6dfad02a11da)) * new menu, unfinished header ([f1b249b](https://github.com/splunk/SC4SNMP-UI/commit/f1b249be5b7bacc75d4ceb46204a5063ec18a9ab)) * pagination - backend ([e27e327](https://github.com/splunk/SC4SNMP-UI/commit/e27e3279a18adda7535585c014680eaaa9c732e2)) * pagination of group devices ([18ad840](https://github.com/splunk/SC4SNMP-UI/commit/18ad840acbd2d0205692cec07d19da980af79d64)) * pagination of groups and inventory - backend ([5246cd8](https://github.com/splunk/SC4SNMP-UI/commit/5246cd893ddeae37380a5962ef89c40a29b83c50)) * pagination of inventory ([951208d](https://github.com/splunk/SC4SNMP-UI/commit/951208dff30913bde8a9ecbab131aef2022f9059)) * pagination with selection of devices per page ([010c56c](https://github.com/splunk/SC4SNMP-UI/commit/010c56cc94601db4c0de6dd06483a8c6403259b5)) * Patterns validation. Validation of new empty fields not always works ([929e6e7](https://github.com/splunk/SC4SNMP-UI/commit/929e6e78690437d5dd9c82f76236165f6a684ab5)) * polishing inventory ([8284054](https://github.com/splunk/SC4SNMP-UI/commit/82840547d1fcb2e5e2cab0acaca979614f186804)) * position add and apply buttons, change font size in the menu bar ([bebba11](https://github.com/splunk/SC4SNMP-UI/commit/bebba11f3567b8f0af9ef9c01185eae24b6df783)) * refactor ([aca5ded](https://github.com/splunk/SC4SNMP-UI/commit/aca5ded0508fc6e631c7e4150b99b232e821a204)) * refactor AddProfileModal.jsx ([35f632b](https://github.com/splunk/SC4SNMP-UI/commit/35f632bbec2bb53775ac059076097a9add3e7ef7)) * refactor code and add comments ([0688c29](https://github.com/splunk/SC4SNMP-UI/commit/0688c2928409cbb083790276cb926e0511965055)) * refactor routes in flask, refactor InventoryList.jsx, change delete button in delete modal to primary, after deleting anything always display confiramtion, change label on the button in the inventory, add a unit to the polling frequency ([0646361](https://github.com/splunk/SC4SNMP-UI/commit/0646361019a76f606cacdf1b5a382e4c422a11f1)) * Refactored inventory.py and basic edit of inventory ([0680781](https://github.com/splunk/SC4SNMP-UI/commit/0680781b1649f587c53f36920329cc652cb1fe55)) * refactoring ([f6ae9c9](https://github.com/splunk/SC4SNMP-UI/commit/f6ae9c97bb2adbf8f4ae2aa33be8ea3700f83f5b)) * refactoring backend ([e17ad77](https://github.com/splunk/SC4SNMP-UI/commit/e17ad7702e03fa5b861267f40d5a86a6d84cad1d)) * refresh updates autoamtically after initial app start ([ff61c7f](https://github.com/splunk/SC4SNMP-UI/commit/ff61c7f3cab462c62142a66784fb491cf64790df)) * remove extra quotation mark in .releaserc ([af58564](https://github.com/splunk/SC4SNMP-UI/commit/af585647d5acb282f4150b861853bd0ce98acd5b)) * remove print ([abb1348](https://github.com/splunk/SC4SNMP-UI/commit/abb13484d7b2666da2d3b0413e3795410da6c687)) * remove spaces in ValidationStyles.jsx ([4e9b506](https://github.com/splunk/SC4SNMP-UI/commit/4e9b5065e84fa01bebf6b68ea1755743a750df29)) * remove temporary tooltip ([67b69ea](https://github.com/splunk/SC4SNMP-UI/commit/67b69eaa9e7e0114c80b754e329b5f58b82116e5)) * removed some logs ([e96b124](https://github.com/splunk/SC4SNMP-UI/commit/e96b124cfe90923a6e1082ef212f9f3f9dccfd7f)) * Renamed IP addres in form to IP addres/Group ([31c8cc5](https://github.com/splunk/SC4SNMP-UI/commit/31c8cc5ec141a03183f3dea74d6be11308a54ed1)) * renamed key in devices record from group_id to groupId ([23fd776](https://github.com/splunk/SC4SNMP-UI/commit/23fd776be8106027fb5f1ac772bab26fbbb29878)) * restructure directory ([15ec5a0](https://github.com/splunk/SC4SNMP-UI/commit/15ec5a0df4d3e1be44fee960a00bb282de51ffe5)) * run frontend on localhost ([da49efa](https://github.com/splunk/SC4SNMP-UI/commit/da49efa72a28c9d0739f355d2da762e56822a0b6)) * same database for profiles ([608c4b1](https://github.com/splunk/SC4SNMP-UI/commit/608c4b1f35299ddc053a5b4915e891df0edbca78)) * send default values from ProfilesModal to the database ([4786bba](https://github.com/splunk/SC4SNMP-UI/commit/4786bba2a1607f5839605f9214a36136c82e047d)) * set default values for port number and snmp version to empty strings in device configuration in groups ([06fe3dc](https://github.com/splunk/SC4SNMP-UI/commit/06fe3dcc21d326e2dc8be115420f5f1cb8ba8d7e)) * set profiles as default page ([e26b508](https://github.com/splunk/SC4SNMP-UI/commit/e26b508b4ea8baa215ca5f54d760613045e1529f)) * state is changed in componentDidUpdate and componentDidMount instead of in render() ([21fa4e1](https://github.com/splunk/SC4SNMP-UI/commit/21fa4e159bcd9f2329ecae58e88a63bc3f6aaa52)) * styling menu bar ([676783c](https://github.com/splunk/SC4SNMP-UI/commit/676783c852d6d1bbc047883550413bc9d07fad80)) * testing menu tab ([eb09a26](https://github.com/splunk/SC4SNMP-UI/commit/eb09a26e86544cff602bc3e6bbc2e4e7b78ea473)) * testing pagination of inventory ([286b78f](https://github.com/splunk/SC4SNMP-UI/commit/286b78f1a59fc68234e2df3a80f10c697147e7b4)) * tooltpis ([6a16dc8](https://github.com/splunk/SC4SNMP-UI/commit/6a16dc89b9d6bfb719ff738b0b4d82fb19a55bcd)) * unit tests of conversions ([aa094b2](https://github.com/splunk/SC4SNMP-UI/commit/aa094b2f60c7b096242b4bba018400ccf3eb6727)) * update .gitignore ([b486e9e](https://github.com/splunk/SC4SNMP-UI/commit/b486e9ec8e9082afa203b24fabcd9fb6ae7981ba)) * update .releaserc ([b07613c](https://github.com/splunk/SC4SNMP-UI/commit/b07613cf6e1573cef69df1e8a23266eb6a4c7176)) * update .releaserc ([6159070](https://github.com/splunk/SC4SNMP-UI/commit/61590707ba55bad184c11bd5d7e087e163f38d9f)) * update dependencies ([c01315e](https://github.com/splunk/SC4SNMP-UI/commit/c01315e07f36e9ba185b652a7f35ebb0017c809a)) * update device from group id reference ([7642e16](https://github.com/splunk/SC4SNMP-UI/commit/7642e1638f284d8ad60974c8e4768162a663c73d)) * update dockerfile for backend ([890956f](https://github.com/splunk/SC4SNMP-UI/commit/890956f9cd61a357c99af1ee328941459cb7af47)) * update form in profiles ([04fff13](https://github.com/splunk/SC4SNMP-UI/commit/04fff13042230690156431df6097451ba2a44700)) * update lerna.json and package.json, delete package-lock.json ([6a10457](https://github.com/splunk/SC4SNMP-UI/commit/6a10457b7b69a1280ee9b1135158a185b5a63313)) * update the way of passing flask IP to the frontend app ([d06d77d](https://github.com/splunk/SC4SNMP-UI/commit/d06d77d3c20243fcd617515e4953e2d522982e50)) * updated profiles view (opening collapsible not finished) ([f34525a](https://github.com/splunk/SC4SNMP-UI/commit/f34525aab83191862bd7da7c9ce32b303ae47e3a)) * updated urls ([56b8169](https://github.com/splunk/SC4SNMP-UI/commit/56b81695f1da4e5f9ab0bdb1f8bb9ec6f9a590f0)) * use another collection for ui inventory, send error messages to user if theu want to add inventory record that already exists, send error message when user wants to add nonexistent group to the inventory and send message about deleting record from inventory when user deletes group ([bc5006a](https://github.com/splunk/SC4SNMP-UI/commit/bc5006ab3e908c3e6887f74ab0b075f927825ae2)) * use sepereta mongo collection for profiles and groups from ui ([7b405e5](https://github.com/splunk/SC4SNMP-UI/commit/7b405e5aafa2adaaba32959edc710dfe83813937)) * validation for adding group with existing name and existing device in the group ([e741db4](https://github.com/splunk/SC4SNMP-UI/commit/e741db41c5710d20ac197aca2e6be89d5278aa1b)) * validation messages for Profile Name and Frequency ([e64e630](https://github.com/splunk/SC4SNMP-UI/commit/e64e6301e8bcb417db05437f10e5ff0b15f930fe)) * Validation of groups ([8ea0bfe](https://github.com/splunk/SC4SNMP-UI/commit/8ea0bfe3cc949b0836b5103da23d69ffe73c11f8)) * validation of inventory form ([7a82789](https://github.com/splunk/SC4SNMP-UI/commit/7a82789d7de8bc3130ec082dd5e988a5475360d7)) * waraping children with ProfileContext in Manager.jsx ([6030b68](https://github.com/splunk/SC4SNMP-UI/commit/6030b680c00f28ff0163654e348e6c41f4814973)) * working on groups ([a61ab32](https://github.com/splunk/SC4SNMP-UI/commit/a61ab32faa0482759d98e4ff5c593cecfb3863bb)) * working validation ([e4557bf](https://github.com/splunk/SC4SNMP-UI/commit/e4557bf74f64d1c62df7d644e879335450dffe43)) * fix: refactor routes in flask, refactor InventoryList.jsx, change delete button in delete modal to primary, after deleting anything always display confiramtion, change label on the button in the inventory, add a unit to the polling frequency fix: display config from the inventory for the devices in groups fix: unfinished validation of new hosts and group fix: validation of new devices and groups fix: comment out obsolete test - they will be updated while creating test suit * fix: Add apply changes fix: add comments to handle_changes.py and kubernetes_job.py fix: in handle_changes.py: added CheckIfPreviousJobFailed handler, added max retries for job creation, added logging and mongo exception handling in celery task fix: update comment in CheckJobHandler * fix: change gunicorn log-level to info * fix: enable compound indices in profiles and configuration of hosts in the inventory using string address fix: enable compound indices and configuration of string based hosts fix: refactor condition in profiles form fix: in ProfileConversion._backend2ui_map for empty patterns filed return empty list instead of None fix: refactor varBinds form * fix: add conditional profiles fix: add conversion of conditional profiles fix: validate if any varBind, pattern or conditions were added fix: remove filed and patterns columns from profiles screen, rename fileds in profile form fix: rename UI fileds in profiles change conversion to int and float * fix: update semantic-release-action version * fix: update __string_value_to_numeric method * fix: update node version in workflows * fix: downgrade semantic-release-action * fix: downgrade node in workflows * fix: refactor and add new tests for flask backend. fix: refactoring inventory post endpoints tests fix: refactor tests and conversions after rebasing fix: refactor helpers.py, write test for adding groups in the inventory fix: update test_post_inventory and test_conversions, refactor conversions.py fix: in Conditional.jsx rename operation 'equal' to 'equals', in Header.jsx while opening new profile modal set default condition to 'standard' fix: add tests for apply changes functionality * fix: bump semantic_version to 20 and cycjimmy/semantic-release-action to 3.4.2 * fix: bump node-version for semantic release and change extra plugin @google/semantic-release-replace-plugin to jpoehnelt/semantic-release-replace-plugin * fix: change jpoehnelt/semantic-release-replace-plugin to semantic-release-replace-plugin * fix: use semantic-release-replace-plugin@v1.2.0 * fix: bump semantic-release-replace-plugin * fix: use @google/semantic-release-replace-plugin@1.2.5 plugin * fix: use @google/semantic-release-replace-plugin@1.2.0 plugin * fix: use @google/semantic-release-replace-plugin@v1.2.5 plugin * fix: in .releaserc use semantic-release-replace-plugin and in ci-release.yaml semantic-release-replace-plugin@v1.2.4 * fix: in .releaserc use semantic-release-replace-plugin and in ci-release.yaml semantic-release-replace-plugin@1.2.4 * fix: use semantic-release-replace-plugin@1.2.0 * chore(release): 1.0.0-beta.2 # [1.0.0-beta.2](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.1...v1.0.0-beta.2) (2023-07-11) ### Bug Fixes * Add apply changes ([f3e450a](https://github.com/splunk/SC4SNMP-UI/commit/f3e450af76d036e7fe26e91d9885c68b4aedb78c)) * add conditional profiles ([1e146c9](https://github.com/splunk/SC4SNMP-UI/commit/1e146c95332d6a9a5a211e8f92aacc0a494f86d8)) * bump node-version for semantic release and change extra plugin @google/semantic-release-replace-plugin to jpoehnelt/semantic-release-replace-plugin ([1fb717b](https://github.com/splunk/SC4SNMP-UI/commit/1fb717b22b6e01028bd442b23691869a36e2b3de)) * bump semantic_version to 20 and cycjimmy/semantic-release-action to 3.4.2 ([f58270c](https://github.com/splunk/SC4SNMP-UI/commit/f58270c2ddaec78105fddf14fd8be58b03019ee3)) * bump semantic-release-replace-plugin ([39992eb](https://github.com/splunk/SC4SNMP-UI/commit/39992eb51ef4bab12718cac6123a2b7aa6d9e211)) * change gunicorn log-level to info ([4420748](https://github.com/splunk/SC4SNMP-UI/commit/4420748bbeaec3804b263c0b2edcb324afa84e6f)) * change jpoehnelt/semantic-release-replace-plugin to semantic-release-replace-plugin ([5d1dc3f](https://github.com/splunk/SC4SNMP-UI/commit/5d1dc3f98c2c23e5b7497683b0cd1a9d655310f9)) * downgrade node in workflows ([d2a1a99](https://github.com/splunk/SC4SNMP-UI/commit/d2a1a9982cba6218398e93fa93244cd2e2688c04)) * downgrade semantic-release-action ([399726a](https://github.com/splunk/SC4SNMP-UI/commit/399726aa9512e2a8506e49bbf4dc719e0dd68a40)) * enable compound indices in profiles and configuration of hosts in the inventory using string address ([30f6d90](https://github.com/splunk/SC4SNMP-UI/commit/30f6d9053f51acf36c6bbbe990e0b55fb581df86)) * in .releaserc use semantic-release-replace-plugin and in ci-release.yaml semantic-release-replace-plugin@1.2.4 ([94bdc57](https://github.com/splunk/SC4SNMP-UI/commit/94bdc576f9712d74cfe5fe01fc2db8fa029b2219)) * in .releaserc use semantic-release-replace-plugin and in ci-release.yaml semantic-release-replace-plugin@v1.2.4 ([a3122b5](https://github.com/splunk/SC4SNMP-UI/commit/a3122b55078c2b8fcc2a09f63ecfb3a108166073)) * refactor and add new tests for flask backend. ([a9dc102](https://github.com/splunk/SC4SNMP-UI/commit/a9dc1021d37a3ac12e1e445788d9e3a5e8ee2867)) * refactor routes in flask, refactor InventoryList.jsx, change delete button in delete modal to primary, after deleting anything always display confiramtion, change label on the button in the inventory, add a unit to the polling frequency ([13e48d0](https://github.com/splunk/SC4SNMP-UI/commit/13e48d06da3dfc6df65b169ee4b43691549364ff)) * update __string_value_to_numeric method ([3f7bbdf](https://github.com/splunk/SC4SNMP-UI/commit/3f7bbdfd4afb2f1880712b13b17caa33eeea289b)) * update node version in workflows ([052582c](https://github.com/splunk/SC4SNMP-UI/commit/052582cd13032811fb62ab7e3833ae4e5c9ebeca)) * update semantic-release-action version ([6604c8f](https://github.com/splunk/SC4SNMP-UI/commit/6604c8fef43c9ad9dc1b6eb01251222824988c67)) * use @google/semantic-release-replace-plugin@1.2.0 plugin ([addccef](https://github.com/splunk/SC4SNMP-UI/commit/addccef075c7056ba84accebb89ceb08524ce212)) * use @google/semantic-release-replace-plugin@1.2.5 plugin ([434da60](https://github.com/splunk/SC4SNMP-UI/commit/434da60d6d6784072700214880db27aa559fe47f)) * use @google/semantic-release-replace-plugin@v1.2.5 plugin ([7fbdedb](https://github.com/splunk/SC4SNMP-UI/commit/7fbdedbaabae6a20204fa7717942d98f77290d99)) * use semantic-release-replace-plugin@1.2.0 ([9949783](https://github.com/splunk/SC4SNMP-UI/commit/99497832a221a3733c1f296af14fe3318f10c1bc)) * use semantic-release-replace-plugin@v1.2.0 ([5d45a28](https://github.com/splunk/SC4SNMP-UI/commit/5d45a284d22dc720c60327a5c39422b17c8e5d79)) * fix: unit tests of inventory form, refactor contexts * fix: add unit tests for groups forms, add tests for varbinds, smart profile patterns and profile name in profiles form * fix: finish AddProfileModal tests, add test for snmpv3 in the inventory * fix: delete Manager.unit.jsx * chore(release): 1.0.0-beta.3 # [1.0.0-beta.3](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.2...v1.0.0-beta.3) (2023-07-18) ### Bug Fixes * add unit tests for groups forms, add tests for varbinds, smart profile patterns and profile name in profiles form ([0a4758c](https://github.com/splunk/SC4SNMP-UI/commit/0a4758c698a09b23407dec9fe03c110dc6db1a62)) * delete Manager.unit.jsx ([cf3088d](https://github.com/splunk/SC4SNMP-UI/commit/cf3088d0a29daf74c8f0697c6f4ae28e80fbae85)) * finish AddProfileModal tests, add test for snmpv3 in the inventory ([2e6dd22](https://github.com/splunk/SC4SNMP-UI/commit/2e6dd220af8aa8a930a1023712873052882e3c20)) * unit tests of inventory form, refactor contexts ([edc60a3](https://github.com/splunk/SC4SNMP-UI/commit/edc60a37a1a1dd5c17b900097e0072e078c96d98)) * fix: add warnings when deleting groups or profiles which are configured in the inventory. No frequency configuration for walk profiles. Default number of presented items changed to 20. fix: change message while deleting profiles. Change what is displayed in Frequency column for walk profiles fix: update tests fix: remove unnecessary print fix: improve logging in handle_changes.py, update config_collection in job insted of in CheckIfPreviousJobFailed fix: fix error in celery job fix: update request message in apply_changes fix: typo in return message fix: fix problem with editing walk profile fix: refactor files and write config from mongo to yaml files on host machine fix: change message in error thrown by SaveConfigToFileHandler * fix: save mongo configuration to values.yaml fix: change directory for temp files fix: add test for edited values.yaml file fix: typos and description of yaml_escape_list function * chore(release): 1.0.0-beta.4 # [1.0.0-beta.4](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.3...v1.0.0-beta.4) (2023-07-27) ### Bug Fixes * add warnings when deleting groups or profiles which are configured in the inventory. No frequency configuration for walk profiles. Default number of presented items changed to 20. ([8a6cb01](https://github.com/splunk/SC4SNMP-UI/commit/8a6cb0134b08256e4bd81b972e5941ed894ad60b)) * save mongo configuration to values.yaml ([fb615f6](https://github.com/splunk/SC4SNMP-UI/commit/fb615f68304f8326036eb08cf92a0b993ffb5e28)) * fix: add selectors for tests fix: update frontend unit tests fix: rebase on develop branch fix: add more ui test selectors, delete unused ButtonsModal component * chore(release): 1.0.0-beta.5 # [1.0.0-beta.5](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.4...v1.0.0-beta.5) (2023-07-27) ### Bug Fixes * add selectors for tests ([f025b9c](https://github.com/splunk/SC4SNMP-UI/commit/f025b9c02bfb69bbc169d76058257df9a3f9fc3d)) * fix: stay on the same page after refreshing the browser * chore(release): 1.0.0-beta.6 # [1.0.0-beta.6](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.5...v1.0.0-beta.6) (2023-07-27) ### Bug Fixes * stay on the same page after refreshing the browser ([4bf090f](https://github.com/splunk/SC4SNMP-UI/commit/4bf090f5a593c824908638852cde44a5fd236f7b)) * fix: validation of the same varBinds, conditions and patterns in one profile * chore(release): 1.0.0-beta.7 # [1.0.0-beta.7](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.6...v1.0.0-beta.7) (2023-07-28) ### Bug Fixes * validation of the same varBinds, conditions and patterns in one profile ([d9bed2d](https://github.com/splunk/SC4SNMP-UI/commit/d9bed2dc278701431a9af5801ab745a008b4b99e)) * fix: add unit tests to pipeline fix: add VALUES_DIRECTORY env variable to test-unit-backend job fix: env variable instead of .env file fix: add unit tests for frontend in the pipeline fix: fix test-unit-backend and test failing tests of frontend fix: test backend failing tests fix: working tests fix: update checkout and setup-python actions, delete actions/upload-artifact@v3 action * chore(release): 1.0.0-beta.8 # [1.0.0-beta.8](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.7...v1.0.0-beta.8) (2023-07-28) ### Bug Fixes * add unit tests to pipeline ([4ed7634](https://github.com/splunk/SC4SNMP-UI/commit/4ed76347c5710d5806e2202390a859805cf7afe9)) * fix: add groups and single hosts in separate fields. Clear notofication when some record wasn't edited or added (#38) fix: fix broken test fix: remove unused imports fix: validate situation when user tries to add host which has the same name as a group configured in groups or reverse situation fix: allow adding host names staring with a digit fix: update get_inventory_type function and tests * chore(release): 1.0.0-beta.9 # [1.0.0-beta.9](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.8...v1.0.0-beta.9) (2023-08-02) ### Bug Fixes * add groups and single hosts in separate fields. Clear notofication when some record wasn't edited or added ([#38](https://github.com/splunk/SC4SNMP-UI/issues/38)) ([86b4cff](https://github.com/splunk/SC4SNMP-UI/commit/86b4cfff9aa1b5f3c8e3e65ae038d2b625315d0e)) * fix: update test ids (#39) * chore(release): 1.0.0-beta.10 # [1.0.0-beta.10](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.9...v1.0.0-beta.10) (2023-08-07) ### Bug Fixes * update test ids ([#39](https://github.com/splunk/SC4SNMP-UI/issues/39)) ([68d32a5](https://github.com/splunk/SC4SNMP-UI/commit/68d32a5a835a643795a9c24ae59d217352e370a5)) * fix: upgrade varbinds validation (#40) * fix: group name can't start with a number (#41) * chore(release): 1.0.0-beta.11 # [1.0.0-beta.11](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.10...v1.0.0-beta.11) (2023-08-10) ### Bug Fixes * group name can't start with a number ([#41](https://github.com/splunk/SC4SNMP-UI/issues/41)) ([2fc2ee6](https://github.com/splunk/SC4SNMP-UI/commit/2fc2ee6e26f10d2d125358ffcd63cf3169658dd0)) * upgrade varbinds validation ([#40](https://github.com/splunk/SC4SNMP-UI/issues/40)) ([9edebff](https://github.com/splunk/SC4SNMP-UI/commit/9edebfff91a9d1103790573a37265a573b17b7b9)) * Fix/remove indices from test ids (#42) * fix: remove indices from tests ids * fix: add test ids to tables and individual cells in profiles, groups and inventory * fix: trigger release (#43) * chore(release): 1.0.0-beta.12 # [1.0.0-beta.12](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.11...v1.0.0-beta.12) (2023-08-14) ### Bug Fixes * trigger release ([#43](https://github.com/splunk/SC4SNMP-UI/issues/43)) ([cf1967a](https://github.com/splunk/SC4SNMP-UI/commit/cf1967a938f657a7d30e2e5d0140a23738a6fb89)) * fix: fix bug with deleted inventory records reappearing after deleting profiles (#44) * chore(release): 1.0.0-beta.13 # [1.0.0-beta.13](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.12...v1.0.0-beta.13) (2023-09-11) ### Bug Fixes * fix bug with deleted inventory records reappearing after deleting profiles ([#44](https://github.com/splunk/SC4SNMP-UI/issues/44)) ([599f475](https://github.com/splunk/SC4SNMP-UI/commit/599f475789c2e91066e17cf9a02a788aeeffa903)) * fix: fix typos in groups (#45) * chore(release): 1.0.0-beta.14 # [1.0.0-beta.14](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.13...v1.0.0-beta.14) (2023-09-11) ### Bug Fixes * fix typos in groups ([#45](https://github.com/splunk/SC4SNMP-UI/issues/45)) ([bad506e](https://github.com/splunk/SC4SNMP-UI/commit/bad506ec88d21b72e8b64bd0cab3e82d107ca939)) * fix: fix apply changes api message (#46) * chore(release): 1.0.0-beta.15 # [1.0.0-beta.15](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.14...v1.0.0-beta.15) (2023-09-12) ### Bug Fixes * fix apply changes api message ([#46](https://github.com/splunk/SC4SNMP-UI/issues/46)) ([8d72e17](https://github.com/splunk/SC4SNMP-UI/commit/8d72e17ea23a154981618ed954b45b52f294c483)) * fix: add max walk interval validation, fix clicking on group names, add max group name length validation (#47) * chore(release): 1.0.0-beta.16 # [1.0.0-beta.16](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.15...v1.0.0-beta.16) (2023-09-12) ### Bug Fixes * add max walk interval validation, fix clicking on group names, add max group name length validation ([#47](https://github.com/splunk/SC4SNMP-UI/issues/47)) ([b9ad7d7](https://github.com/splunk/SC4SNMP-UI/commit/b9ad7d7e6f8fd555c72fa5f9b3da739fb45e58ae)) * fix: add regex and negation options for conditional profiles (#48) fix: add unit test for conditional profile negation fix: update backend unit test * chore(release): 1.0.0-beta.17 # [1.0.0-beta.17](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.16...v1.0.0-beta.17) (2023-09-15) ### Bug Fixes * add regex and negation options for conditional profiles ([#48](https://github.com/splunk/SC4SNMP-UI/issues/48)) ([a34ab61](https://github.com/splunk/SC4SNMP-UI/commit/a34ab6127662d367fd8f3f73474394a4209884cd)) * fix: fix typos in ui (#49) * chore(release): 1.0.0-beta.18 # [1.0.0-beta.18](https://github.com/splunk/SC4SNMP-UI/compare/v1.0.0-beta.17...v1.0.0-beta.18) (2023-10-04) ### Bug Fixes * fix typos in ui ([#49](https://github.com/splunk/SC4SNMP-UI/issues/49)) ([ccb482d](https://github.com/splunk/SC4SNMP-UI/commit/ccb482df6c8c499f0d1cf69a0b01f9fce83d3e00)) --------- Co-authored-by: semantic-release-bot --- .github/workflows/ci-build-backend.yaml | 80 -- .../{ci-build-frontend.yaml => ci-build.yaml} | 63 +- .github/workflows/ci-main.yaml | 80 ++ .github/workflows/ci-release.yaml | 153 ++ .releaserc | 86 ++ backend/Dockerfile | 11 +- backend/SC4SNMP_UI_backend/__init__.py | 52 +- .../__init__.py | 0 .../apply_changes/apply_changes.py | 69 + .../apply_changes/config_to_yaml_utils.py | 232 +++ .../apply_changes/handling_chain.py | 169 +++ .../apply_changes/kubernetes_job.py | 189 +++ .../apply_changes/routes.py | 22 + .../SC4SNMP_UI_backend/apply_changes/tasks.py | 55 + .../common/backend_ui_conversions.py | 308 ++++ .../SC4SNMP_UI_backend/common/conversions.py | 226 --- .../common/inventory_utils.py | 266 ++++ backend/SC4SNMP_UI_backend/groups/__init__.py | 0 backend/SC4SNMP_UI_backend/groups/routes.py | 175 +++ .../SC4SNMP_UI_backend/inventory/__init__.py | 0 .../SC4SNMP_UI_backend/inventory/routes.py | 93 ++ .../SC4SNMP_UI_backend/profiles/__init__.py | 0 backend/SC4SNMP_UI_backend/profiles/routes.py | 130 ++ .../SC4SNMP_UI_backend/ui_handling/helpers.py | 97 -- .../SC4SNMP_UI_backend/ui_handling/routes.py | 309 ---- backend/app.py | 5 +- backend/celery_start.sh | 5 + backend/flask_start.sh | 4 + backend/package-lock.json | 153 -- backend/package.json | 5 - backend/requirements.txt | 8 +- backend/tests/common/__init__.py | 0 ...ions.py => test_backend_ui_conversions.py} | 105 +- backend/tests/ui_handling/__init__.py | 0 .../tests/ui_handling/create_job_object.py | 173 +++ .../ui_handling/get_endpoints/__init__.py | 0 .../{ => get_endpoints}/test_get_endpoints.py | 46 +- .../ui_handling/post_endpoints/__init__.py | 0 .../post_endpoints/test_post_apply_changes.py | 299 ++++ .../post_endpoints/test_post_groups.py | 579 ++++++++ .../post_endpoints/test_post_inventory.py | 1263 +++++++++++++++++ .../post_endpoints/test_post_profiles.py | 274 ++++ .../tests/ui_handling/test_post_endpoints.py | 998 ------------- .../reference_files/poller_inventory.yaml | 4 + .../reference_files/scheduler_groups.yaml | 12 + .../reference_files/scheduler_profiles.yaml | 51 + .../reference_files/values.yaml | 161 +++ .../yamls_for_tests/values_test/.gitignore | 1 + .../values_test/values-before-edit.yaml | 139 ++ .../yamls_for_tests/values_test/values.yaml | 139 ++ frontend/Dockerfile | 1 + frontend/lerna.json | 2 +- .../splunk-app/appserver/templates/demo.html | 2 +- .../manager/demo/standalone/index.html | 2 +- frontend/packages/manager/jest.config.js | 6 +- frontend/packages/manager/package.json | 13 +- frontend/packages/manager/src/Manager.jsx | 8 +- .../manager/src/components/ButtonsModal.jsx | 34 - .../manager/src/components/DeleteModal.jsx | 12 +- .../manager/src/components/ErrorsModal.jsx | 23 +- .../src/components/groups/AddDeviceModal.jsx | 67 +- .../src/components/groups/AddGroupModal.jsx | 26 +- .../src/components/groups/GroupsList.jsx | 90 +- .../inventory/AddInventoryModal.jsx | 154 +- .../components/inventory/InventoryList.jsx | 280 ++-- .../src/components/menu_header/Header.jsx | 37 +- .../src/components/menu_header/Menu.jsx | 30 +- .../components/profiles/AddProfileModal.jsx | 105 +- .../src/components/profiles/Condition.jsx | 75 + .../src/components/profiles/Conditional.jsx | 197 +++ .../src/components/profiles/ConditionalIn.jsx | 116 ++ .../src/components/profiles/Conditions.jsx | 75 - .../src/components/profiles/FieldPatterns.jsx | 114 ++ .../components/profiles/PatternsCreator.jsx | 169 --- .../src/components/profiles/ProfilesList.jsx | 96 +- .../src/components/profiles/VarBinds.jsx | 137 ++ .../components/profiles/VarbindsCreator.jsx | 200 --- .../validation/ValidateInventoryAndGroup.jsx | 62 +- .../validation/ValidateProfiles.jsx | 240 +++- .../components/validation/ValidationGroup.jsx | 12 + .../manager/src/pages/InventoryPage.jsx | 8 +- .../manager/src/store/buttons-contx.jsx | 10 +- .../manager/src/store/errors-modal-contxt.jsx | 14 +- .../manager/src/store/group-contxt.jsx | 19 +- .../manager/src/store/inventory-contxt.jsx | 69 +- .../inventory-devices-validation-contxt.jsx | 3 +- .../manager/src/store/menu-header-contxt.jsx | 3 +- .../manager/src/store/profile-contxt.jsx | 23 +- .../src/store/profiles-validation-contxt.jsx | 100 +- .../manager/src/styles/ValidationStyles.jsx | 3 +- .../manager/src/tests/AddDeviceModal.test.jsx | 92 ++ .../manager/src/tests/AddGroupModal.test.jsx | 61 + .../src/tests/AddInventoryModal.test.jsx | 170 +++ .../src/tests/AddProfileModal.test.jsx | 493 +++++++ .../manager/src/tests/Manager.unit.jsx | 33 - .../custom_testing_lib/custom-queries.jsx | 5 + .../custom_testing_lib/custom-testing-lib.jsx | 19 + .../MockErrorsContextProvider.jsx | 19 + .../MockGroupContextProvider.jsx | 111 ++ .../MockInventoryContextProvider.jsx | 89 ++ ...MockInventoryValidationContextProvider.jsx | 100 ++ .../MockProfileContextProvider.jsx | 65 + .../MockProfileValidationContextProvider.jsx | 155 ++ frontend/yarn.lock | 743 +++++++++- 104 files changed, 9032 insertions(+), 3049 deletions(-) delete mode 100644 .github/workflows/ci-build-backend.yaml rename .github/workflows/{ci-build-frontend.yaml => ci-build.yaml} (57%) create mode 100644 .github/workflows/ci-main.yaml create mode 100644 .github/workflows/ci-release.yaml create mode 100644 .releaserc rename backend/SC4SNMP_UI_backend/{ui_handling => apply_changes}/__init__.py (100%) create mode 100644 backend/SC4SNMP_UI_backend/apply_changes/apply_changes.py create mode 100644 backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py create mode 100644 backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py create mode 100644 backend/SC4SNMP_UI_backend/apply_changes/kubernetes_job.py create mode 100644 backend/SC4SNMP_UI_backend/apply_changes/routes.py create mode 100644 backend/SC4SNMP_UI_backend/apply_changes/tasks.py create mode 100644 backend/SC4SNMP_UI_backend/common/backend_ui_conversions.py delete mode 100644 backend/SC4SNMP_UI_backend/common/conversions.py create mode 100644 backend/SC4SNMP_UI_backend/common/inventory_utils.py create mode 100644 backend/SC4SNMP_UI_backend/groups/__init__.py create mode 100644 backend/SC4SNMP_UI_backend/groups/routes.py create mode 100644 backend/SC4SNMP_UI_backend/inventory/__init__.py create mode 100644 backend/SC4SNMP_UI_backend/inventory/routes.py create mode 100644 backend/SC4SNMP_UI_backend/profiles/__init__.py create mode 100644 backend/SC4SNMP_UI_backend/profiles/routes.py delete mode 100644 backend/SC4SNMP_UI_backend/ui_handling/helpers.py delete mode 100644 backend/SC4SNMP_UI_backend/ui_handling/routes.py create mode 100644 backend/celery_start.sh create mode 100644 backend/flask_start.sh delete mode 100644 backend/package-lock.json delete mode 100644 backend/package.json create mode 100644 backend/tests/common/__init__.py rename backend/tests/common/{test_conversions.py => test_backend_ui_conversions.py} (66%) create mode 100644 backend/tests/ui_handling/__init__.py create mode 100644 backend/tests/ui_handling/create_job_object.py create mode 100644 backend/tests/ui_handling/get_endpoints/__init__.py rename backend/tests/ui_handling/{ => get_endpoints}/test_get_endpoints.py (88%) create mode 100644 backend/tests/ui_handling/post_endpoints/__init__.py create mode 100644 backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py create mode 100644 backend/tests/ui_handling/post_endpoints/test_post_groups.py create mode 100644 backend/tests/ui_handling/post_endpoints/test_post_inventory.py create mode 100644 backend/tests/ui_handling/post_endpoints/test_post_profiles.py delete mode 100644 backend/tests/ui_handling/test_post_endpoints.py create mode 100644 backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml create mode 100644 backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml create mode 100644 backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml create mode 100644 backend/tests/yamls_for_tests/reference_files/values.yaml create mode 100644 backend/tests/yamls_for_tests/values_test/.gitignore create mode 100644 backend/tests/yamls_for_tests/values_test/values-before-edit.yaml create mode 100644 backend/tests/yamls_for_tests/values_test/values.yaml delete mode 100644 frontend/packages/manager/src/components/ButtonsModal.jsx create mode 100644 frontend/packages/manager/src/components/profiles/Condition.jsx create mode 100644 frontend/packages/manager/src/components/profiles/Conditional.jsx create mode 100644 frontend/packages/manager/src/components/profiles/ConditionalIn.jsx delete mode 100644 frontend/packages/manager/src/components/profiles/Conditions.jsx create mode 100644 frontend/packages/manager/src/components/profiles/FieldPatterns.jsx delete mode 100644 frontend/packages/manager/src/components/profiles/PatternsCreator.jsx create mode 100644 frontend/packages/manager/src/components/profiles/VarBinds.jsx delete mode 100644 frontend/packages/manager/src/components/profiles/VarbindsCreator.jsx create mode 100644 frontend/packages/manager/src/components/validation/ValidationGroup.jsx create mode 100644 frontend/packages/manager/src/tests/AddDeviceModal.test.jsx create mode 100644 frontend/packages/manager/src/tests/AddGroupModal.test.jsx create mode 100644 frontend/packages/manager/src/tests/AddInventoryModal.test.jsx create mode 100644 frontend/packages/manager/src/tests/AddProfileModal.test.jsx delete mode 100644 frontend/packages/manager/src/tests/Manager.unit.jsx create mode 100644 frontend/packages/manager/src/tests/custom_testing_lib/custom-queries.jsx create mode 100644 frontend/packages/manager/src/tests/custom_testing_lib/custom-testing-lib.jsx create mode 100644 frontend/packages/manager/src/tests/mock_context_providers/MockErrorsContextProvider.jsx create mode 100644 frontend/packages/manager/src/tests/mock_context_providers/MockGroupContextProvider.jsx create mode 100644 frontend/packages/manager/src/tests/mock_context_providers/MockInventoryContextProvider.jsx create mode 100644 frontend/packages/manager/src/tests/mock_context_providers/MockInventoryValidationContextProvider.jsx create mode 100644 frontend/packages/manager/src/tests/mock_context_providers/MockProfileContextProvider.jsx create mode 100644 frontend/packages/manager/src/tests/mock_context_providers/MockProfileValidationContextProvider.jsx diff --git a/.github/workflows/ci-build-backend.yaml b/.github/workflows/ci-build-backend.yaml deleted file mode 100644 index 9641d2e..0000000 --- a/.github/workflows/ci-build-backend.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# ######################################################################## -# Copyright 2021 Splunk Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ######################################################################## - -name: ci-build-backend -on: - pull_request: - branches: - - "main" - - "develop" - - "next" - push: - branches: - - "main" - - "develop" - - "next" - tags-ignore: - - "v*" - -jobs: - release: - name: Release - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - submodules: false - persist-credentials: false - - #Build docker images - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to GitHub Packages Docker Registry - uses: docker/login-action@v1.9.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Docker meta - id: docker_meta - uses: docker/metadata-action@v3 - with: - images: ghcr.io/splunk/sc4snmp-ui/backend/container - tags: | - type=semver,pattern=v{{major}}.{{minor}} - type=semver,pattern=v{{major}} - type=semver,pattern=v{{version}} - type=semver,pattern={{major}}.{{minor}} - type=semver,pattern={{major}} - type=semver,pattern={{version}} - type=ref,event=branch - type=ref,event=pr - - name: Build and push action - backend - id: docker_action_build_backend - uses: docker/build-push-action@v2 - with: - context: backend - push: true - platforms: linux/amd64,linux/arm64 - tags: ${{ steps.docker_meta.outputs.tags }} - labels: ${{ steps.docker_meta.outputs.labels }} - cache-to: type=inline - - uses: actions/setup-node@v2 - with: - node-version: "14" - diff --git a/.github/workflows/ci-build-frontend.yaml b/.github/workflows/ci-build.yaml similarity index 57% rename from .github/workflows/ci-build-frontend.yaml rename to .github/workflows/ci-build.yaml index f9982d9..ed2b9b0 100644 --- a/.github/workflows/ci-build-frontend.yaml +++ b/.github/workflows/ci-build.yaml @@ -14,7 +14,7 @@ # limitations under the License. # ######################################################################## -name: ci-build-frontend +name: ci-build on: pull_request: branches: @@ -30,9 +30,12 @@ on: - "v*" jobs: - release: - name: Release + build-frontend: + name: build-frontend runs-on: ubuntu-latest + permissions: + contents: read + packages: write steps: - uses: actions/checkout@v2 with: @@ -50,7 +53,7 @@ jobs: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Docker meta + - name: Docker meta - frontend id: docker_meta uses: docker/metadata-action@v3 with: @@ -63,13 +66,12 @@ jobs: type=semver,pattern={{major}} type=semver,pattern={{version}} type=ref,event=branch - type=ref,event=pr - name: Build and push action - frontend id: docker_action_build_frontend uses: docker/build-push-action@v2 with: context: frontend - push: true + push: false platforms: linux/amd64,linux/arm64 tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} @@ -78,3 +80,52 @@ jobs: with: node-version: "14" + build-backend: + name: build-backend + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + + #Build docker images + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to GitHub Packages Docker Registry + uses: docker/login-action@v1.9.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta - backend + id: docker_meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/splunk/sc4snmp-ui/backend/container + tags: | + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern={{version}} + type=ref,event=branch + - name: Build and push action - backend + id: docker_action_build_backend + uses: docker/build-push-action@v2 + with: + context: backend + push: false + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + cache-to: type=inline + - uses: actions/setup-node@v2 + with: + node-version: "14" diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml new file mode 100644 index 0000000..2107136 --- /dev/null +++ b/.github/workflows/ci-main.yaml @@ -0,0 +1,80 @@ +# ######################################################################## +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ######################################################################## + +name: ci-main +on: + push: + branches: + - "main" + - "develop" + - "next" + tags-ignore: + - "v*" + pull_request: + branches: + - "main" + - "develop" + - "next" +jobs: + test-unit-backend: + name: Test Backend Unit Python ${{ matrix.python-version }} + runs-on: ubuntu-latest + env: + VALUES_DIRECTORY: /tmp + strategy: + matrix: + python-version: + - 3.9 + steps: + - uses: actions/checkout@v3 + - name: Setup python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install packages + working-directory: ./backend + run: pip install -r ./requirements.txt + - name: Run Pytest + working-directory: ./backend + run: pytest + test-unit-frontned: + name: Test Frontend Unit Node ${{ matrix.node-version }} + runs-on: ubuntu-latest + strategy: + matrix: + node-version: + - 16 + steps: + - uses: actions/checkout@v3 + - name: Set Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v3 + with: + node-version: ${{ matrix.node-version }} + - name: Run install + uses: borales/actions-yarn@v4 + with: + cmd: install + dir: 'frontend' + - name: Build + uses: borales/actions-yarn@v4 + with: + cmd: build + dir: 'frontend' + - name: Run test in sub-folder + uses: borales/actions-yarn@v4 + with: + cmd: test + dir: 'frontend' \ No newline at end of file diff --git a/.github/workflows/ci-release.yaml b/.github/workflows/ci-release.yaml new file mode 100644 index 0000000..10b5c30 --- /dev/null +++ b/.github/workflows/ci-release.yaml @@ -0,0 +1,153 @@ +# ######################################################################## +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ######################################################################## + +name: ci-release +on: + push: + branches: + - "main" + - "develop" + - "next" + tags-ignore: + - "v*" + +jobs: + build-frontend: + name: build-frontend + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + + #Build docker images + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to GitHub Packages Docker Registry + uses: docker/login-action@v1.9.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta - frontend + id: docker_meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/splunk/sc4snmp-ui/frontend/container + tags: | + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern={{version}} + type=ref,event=branch + - name: Build and push action - frontend + id: docker_action_build_frontend + uses: docker/build-push-action@v2 + with: + context: frontend + push: true + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + cache-to: type=inline + - uses: actions/setup-node@v2 + with: + node-version: "14" + + build-backend: + name: build-backend + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + + #Build docker images + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Login to GitHub Packages Docker Registry + uses: docker/login-action@v1.9.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Docker meta - backend + id: docker_meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/splunk/sc4snmp-ui/backend/container + tags: | + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern=v{{major}} + type=semver,pattern=v{{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=semver,pattern={{version}} + type=ref,event=branch + - name: Build and push action - backend + id: docker_action_build_backend + uses: docker/build-push-action@v2 + with: + context: backend + push: true + platforms: linux/amd64,linux/arm64 + tags: ${{ steps.docker_meta.outputs.tags }} + labels: ${{ steps.docker_meta.outputs.labels }} + cache-to: type=inline + - uses: actions/setup-node@v2 + with: + node-version: "14" + release: + name: Release + needs: [build-frontend, build-backend] + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + with: + submodules: false + persist-credentials: false + - uses: actions/setup-node@v2 + with: + node-version: "14" + - name: Semantic Release + id: version + uses: cycjimmy/semantic-release-action@v3.2.0 + with: + semantic_version: 17 + extra_plugins: | + @semantic-release/exec + @semantic-release/git + @google/semantic-release-replace-plugin@1.2.0 + env: + GITHUB_TOKEN: ${{ secrets.GH_TOKEN_ADMIN }} + diff --git a/.releaserc b/.releaserc new file mode 100644 index 0000000..1b8fd16 --- /dev/null +++ b/.releaserc @@ -0,0 +1,86 @@ +# +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +{ + "branches": + [ + "+([0-9])?(.{+([0-9]),x}).x", + "main", + { name: "next", channel: "alpha", prerelease: "alpha" }, + { name: "develop", channel: "beta", prerelease: "beta" }, + ], + plugins: + [ + "@semantic-release/commit-analyzer", + [ + "@google/semantic-release-replace-plugin", + { + "replacements": [ + { + "files": ["backend/SC4SNMP_UI_backend/__init__.py"], + "from": "__version__ ?=.*", + "to": "__version__ = \"${nextRelease.version}\"", + "results": [ + { + "file": "backend/SC4SNMP_UI_backend/__init__.py", + "hasChanged": true, + "numMatches": 1, + "numReplacements": 1 + } + ], + "countMatches": true + }, + { + "files": ["frontend/packages/manager/package.json"], + "from": ".*\"version\":.*", + "to": " \"version\": \"${nextRelease.version}\",", + "results": [ + { + "file": "frontend/packages/manager/package.json", + "hasChanged": true, + "numMatches": 1, + "numReplacements": 1 + } + ], + "countMatches": true + }, + { + "files": ["frontend/lerna.json"], + "from": ".*\"version\":.*", + "to": " \"version\": \"${nextRelease.version}\",", + "results": [ + { + "file": "frontend/lerna.json", + "hasChanged": true, + "numMatches": 1, + "numReplacements": 1 + } + ], + "countMatches": true + } + ] + } + ], + "@semantic-release/release-notes-generator", + [ + "@semantic-release/git", + { + "assets": ["NOTICE", "frontend/lerna.json", "frontend/packages/manager/package.json", "backend/SC4SNMP_UI_backend/__init__.py"], + "message": "chore(release): ${nextRelease.version}\n\n${nextRelease.notes}", + }, + ], + ["@semantic-release/github", { "assets": ["NOTICE"] }], + ], +} diff --git a/backend/Dockerfile b/backend/Dockerfile index 49eb52e..d1d734c 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -6,5 +6,14 @@ COPY SC4SNMP_UI_backend ./SC4SNMP_UI_backend RUN pip install -r ./requirements.txt ENV FLASK_DEBUG production + +COPY ./flask_start.sh /flask_start.sh +RUN chmod +x /flask_start.sh + +COPY ./celery_start.sh /celery_start.sh +RUN chmod +x /celery_start.sh + +USER 10000:10000 + EXPOSE 5000 -CMD ["gunicorn", "-b", ":5000", "app:app", "--log-level", "DEBUG"] \ No newline at end of file +CMD ["gunicorn", "-b", ":5000", "app:flask_app", "--log-level", "INFO"] \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/__init__.py b/backend/SC4SNMP_UI_backend/__init__.py index abcd480..bda44d3 100644 --- a/backend/SC4SNMP_UI_backend/__init__.py +++ b/backend/SC4SNMP_UI_backend/__init__.py @@ -2,25 +2,63 @@ from pymongo import MongoClient import os import logging +from celery import Celery +from celery import Task +from dotenv import load_dotenv -try: - from dotenv import load_dotenv +load_dotenv() - load_dotenv() -except: - pass +__version__ = "1.0.0-beta.18" MONGO_URI = os.getenv("MONGO_URI") mongo_client = MongoClient(MONGO_URI) +CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "amqp://guest:guest@localhost:5672//") +REDIS_URL = os.getenv("REDIS_URL") +VALUES_DIRECTORY = os.getenv("VALUES_DIRECTORY", "") +KEEP_TEMP_FILES = os.getenv("KEEP_TEMP_FILES", "false") +class NoValuesDirectoryException(Exception): + pass def create_app(): + if len(VALUES_DIRECTORY) == 0: + raise NoValuesDirectoryException + app = Flask(__name__) - from SC4SNMP_UI_backend.ui_handling.routes import ui - app.register_blueprint(ui) + app.config.from_mapping( + CELERY=dict( + task_default_queue="apply_changes", + broker_url=CELERY_BROKER_URL, + beat_scheduler="redbeat.RedBeatScheduler", + redbeat_redis_url = REDIS_URL, + task_ignore_result=True, + redbeat_lock_key=None, + ), + ) + celery_init_app(app) + from SC4SNMP_UI_backend.profiles.routes import profiles_blueprint + from SC4SNMP_UI_backend.groups.routes import groups_blueprint + from SC4SNMP_UI_backend.inventory.routes import inventory_blueprint + from SC4SNMP_UI_backend.apply_changes.routes import apply_changes_blueprint + app.register_blueprint(profiles_blueprint) + app.register_blueprint(groups_blueprint) + app.register_blueprint(inventory_blueprint) + app.register_blueprint(apply_changes_blueprint) gunicorn_logger = logging.getLogger('gunicorn.error') app.logger.handlers = gunicorn_logger.handlers app.logger.setLevel(gunicorn_logger.level) return app + +def celery_init_app(app: Flask) -> Celery: + class FlaskTask(Task): + def __call__(self, *args: object, **kwargs: object) -> object: + with app.app_context(): + return self.run(*args, **kwargs) + + celery_app = Celery(app.name, task_cls=FlaskTask) + celery_app.config_from_object(app.config["CELERY"]) + celery_app.set_default() + app.extensions["celery"] = celery_app + return celery_app diff --git a/backend/SC4SNMP_UI_backend/ui_handling/__init__.py b/backend/SC4SNMP_UI_backend/apply_changes/__init__.py similarity index 100% rename from backend/SC4SNMP_UI_backend/ui_handling/__init__.py rename to backend/SC4SNMP_UI_backend/apply_changes/__init__.py diff --git a/backend/SC4SNMP_UI_backend/apply_changes/apply_changes.py b/backend/SC4SNMP_UI_backend/apply_changes/apply_changes.py new file mode 100644 index 0000000..b220f9d --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/apply_changes.py @@ -0,0 +1,69 @@ +from threading import Lock +import os +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.apply_changes.handling_chain import CheckJobHandler, ScheduleHandler, SaveConfigToFileHandler +from SC4SNMP_UI_backend.apply_changes.config_to_yaml_utils import ProfilesToYamlDictConversion, ProfilesTempHandling, \ + GroupsToYamlDictConversion, GroupsTempHandling, InventoryToYamlDictConversion, InventoryTempHandling + + +MONGO_URI = os.getenv("MONGO_URI") +JOB_CREATION_RETRIES = int(os.getenv("JOB_CREATION_RETRIES", 10)) +mongo_config_collection = mongo_client.sc4snmp.config_collection +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui +mongo_profiles = mongo_client.sc4snmp.profiles_ui + + + +class SingletonMeta(type): + _instances = {} + _lock: Lock = Lock() + + def __call__(cls, *args, **kwargs): + with cls._lock: + if cls not in cls._instances: + instance = super().__call__(*args, **kwargs) + cls._instances[cls] = instance + return cls._instances[cls] + +class ApplyChanges(metaclass=SingletonMeta): + def __init__(self) -> None: + """ + ApplyChanges is a singleton responsible for creating mongo record with a current state of kubernetes job. + Structure of the record: + { + "previous_job_start_time": datetime.datetime or None if job hasn't been scheduled yet, + "currently_scheduled": bool + } + """ + self.__handling_chain = SaveConfigToFileHandler() + check_job_handler = CheckJobHandler() + schedule_handler = ScheduleHandler() + self.__handling_chain.set_next(check_job_handler).set_next(schedule_handler) + mongo_config_collection.update_one( + { + "previous_job_start_time": {"$exists": True}, + "currently_scheduled": {"$exists": True}} + ,{ + "$set":{ + "previous_job_start_time": None, + "currently_scheduled": False + } + }, + upsert=True + ) + + + def apply_changes(self): + """ + Run chain of actions to schedule new kubernetes job. + """ + yaml_sections = { + "scheduler.groups": (mongo_groups, GroupsToYamlDictConversion, GroupsTempHandling), + "scheduler.profiles": (mongo_profiles, ProfilesToYamlDictConversion, ProfilesTempHandling), + "poller.inventory": (mongo_inventory, InventoryToYamlDictConversion, InventoryTempHandling) + } + return self.__handling_chain.handle({ + "yaml_sections": yaml_sections + }) + diff --git a/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py b/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py new file mode 100644 index 0000000..2ed1900 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/config_to_yaml_utils.py @@ -0,0 +1,232 @@ +from abc import abstractmethod +import ruamel +from ruamel.yaml.scalarstring import SingleQuotedScalarString as single_quote +from ruamel.yaml.scalarstring import DoubleQuotedScalarString as double_quote +from SC4SNMP_UI_backend.common.backend_ui_conversions import get_group_or_profile_name_from_backend +from ruamel.yaml.scalarstring import LiteralScalarString as literal_string +import os +from flask import current_app + + +def bool_to_str(value): + if value: + return "t" + else: + return "f" + + +class MongoToYamlDictConversion: + """ + MongoToYamlDictConversion is an abstract class. Implementations of this class converts + appropriate mongo collections to dictionaries in such a way, that configurations from those collections can be + dumped to yaml file with appropriate formatting. + """ + @classmethod + def yaml_escape_list(cls, *l): + """ + This function is used to parse an example list [yaml_escape_list(el1, el2, el3)] like this: + - [el1, el2, el3] + and not like this: + - el1 + - el2 + - el3 + """ + ret = ruamel.yaml.comments.CommentedSeq(l) + ret.fa.set_flow_style() + return ret + @abstractmethod + def convert(self, documents: list) -> dict: + pass + + +class ProfilesToYamlDictConversion(MongoToYamlDictConversion): + def convert(self, documents: list) -> dict: + """ + ProfilesToYamlDictConversion converts profiles from mongo collection to + format that can be dumped to yaml file + :param documents: list of profiles from mongo + :return: dictionary that can be dumped to yaml + """ + result = {} + for profile in documents: + profile_name = get_group_or_profile_name_from_backend(profile) + prof = profile[profile_name] + var_binds = [] + condition = None + conditions = None + is_walk_profile = False + + for var_bind in prof["varBinds"]: + var_binds.append(self.yaml_escape_list(*[single_quote(vb) for vb in var_bind])) + + if "condition" in prof: + backend_condition = prof["condition"] + condition_type = backend_condition["type"] + is_walk_profile = True if backend_condition["type"] == "walk" else False + condition = { + "type": condition_type + } + if condition_type == "field": + condition["field"] = backend_condition["field"] + condition["patterns"] = [single_quote(pattern) for pattern in backend_condition["patterns"]] + + if "conditions" in prof: + backend_conditions = prof["conditions"] + conditions = [] + for cond in backend_conditions: + if cond["operation"] == "in": + value = [double_quote(v) if type(v) == str else v for v in cond["value"]] + else: + value = double_quote(cond["value"]) if type(cond["value"]) == str else cond["value"] + conditions.append({ + "field": cond["field"], + "operation": double_quote(cond["operation"]), + "value": value + }) + + result[profile_name] = {} + if not is_walk_profile: + result[profile_name]["frequency"] = prof['frequency'] + if condition is not None: + result[profile_name]["condition"] = condition + if conditions is not None: + result[profile_name]["conditions"] = conditions + result[profile_name]["varBinds"] = var_binds + + return result + + +class GroupsToYamlDictConversion(MongoToYamlDictConversion): + def convert(self, documents: list) -> dict: + """ + GroupsToYamlDictConversion converts groups from mongo collection to + format that can be dumped to yaml file + :param documents: list of groups from mongo + :return: dictionary that can be dumped to yaml + """ + result = {} + for group in documents: + group_name = get_group_or_profile_name_from_backend(group) + gr = group[group_name] + hosts = [] + for host in gr: + host_config = host + if "community" in host: + host_config["community"] = single_quote(host["community"]) + if "secret" in host: + host_config["secret"] = single_quote(host["secret"]) + if "version" in host: + host_config["version"] = single_quote(host["version"]) + hosts.append(host_config) + result[group_name] = hosts + return result + + +class InventoryToYamlDictConversion(MongoToYamlDictConversion): + def convert(self, documents: list) -> dict: + """ + InventoryToYamlDictConversion converts inventory from mongo collection to + format that can be dumped to yaml file + :param documents: inventory from mongo + :return: dictionary that can be dumped to yaml + """ + inventory_string = "address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete" + for inv in documents: + smart_profiles = bool_to_str(inv['smart_profiles']) + inv_delete = bool_to_str(inv['delete']) + inventory_string += f"\n{inv['address']},{inv['port']},{inv['version']},{inv['community']}," \ + f"{inv['secret']},{inv['security_engine']},{inv['walk_interval']},{inv['profiles']}," \ + f"{smart_profiles},{inv_delete}" + return { + "inventory": literal_string(inventory_string) + } + + +class TempFileHandling: + """ + After converting configurations from mongo to dictionaries ready to be dumped to yaml file, those dictionaries + must be dumped to temporary files. This is because those configurations must be parsed before they are inserted + to values.yaml file. TempFileHandling is an abstract class whose implementations parse dictionaries and return + ready configuration that can be saved in values.yaml + """ + def __init__(self, file_path: str): + self._file_path = file_path + + def _save_temp(self, content): + yaml = ruamel.yaml.YAML() + with open(self._file_path, "w") as file: + yaml.dump(content, file) + + def _delete_temp(self): + if os.path.exists(self._file_path): + os.remove(self._file_path) + else: + current_app.logger.info(f"Directory {self._file_path} doesn't exist inside a Pod. File wasn't removed.") + + @abstractmethod + def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + pass + + +class ProfilesTempHandling(TempFileHandling): + def __init__(self, file_path: str): + super().__init__(file_path) + + def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with profiles configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ + self._save_temp(document) + lines = "" + with open(self._file_path, "r") as file: + line = file.readline() + while line != "": + lines += line + line = file.readline() + if delete_tmp: + self._delete_temp() + return literal_string(lines) + + +class InventoryTempHandling(TempFileHandling): + def __init__(self, file_path: str): + super().__init__(file_path) + + def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with inventory configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ + self._save_temp(document) + yaml = ruamel.yaml.YAML() + with open(self._file_path, "r") as file: + inventory = yaml.load(file) + result = inventory["inventory"] + if delete_tmp: + self._delete_temp() + return literal_string(result) + + +class GroupsTempHandling(TempFileHandling): + def __init__(self, file_path: str): + super().__init__(file_path) + + def parse_dict_to_yaml(self, document: dict, delete_tmp: bool = True): + """ + :param document: dictionary with groups configuration + :param delete_tmp: whether to delete temporary file after parsing + :return: parsed configuration ready to be saved to values.yaml + """ + self._save_temp(document) + lines = "" + with open(self._file_path, "r") as file: + line = file.readline() + while line != "": + lines += line + line = file.readline() + if delete_tmp: + self._delete_temp() + return literal_string(lines) diff --git a/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py b/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py new file mode 100644 index 0000000..fd8e51a --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/handling_chain.py @@ -0,0 +1,169 @@ +from abc import abstractmethod, ABC +import ruamel.yaml +from flask import current_app +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.apply_changes.tasks import run_job +import datetime +import os + + +CHANGES_INTERVAL_SECONDS = 300 +TMP_FILE_PREFIX = "sc4snmp_ui_" +TMP_DIR = "/tmp" +VALUES_DIRECTORY = os.getenv("VALUES_DIRECTORY", "") +VALUES_FILE = os.getenv("VALUES_FILE", "") +KEEP_TEMP_FILES = os.getenv("KEEP_TEMP_FILES", "false") +mongo_config_collection = mongo_client.sc4snmp.config_collection +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui +mongo_profiles = mongo_client.sc4snmp.profiles_ui + +class Handler(ABC): + @abstractmethod + def set_next(self, handler): + pass + + @abstractmethod + def handle(self, request): + pass + + +class AbstractHandler(Handler): + _next_handler: Handler = None + + def set_next(self, handler: Handler) -> Handler: + self._next_handler = handler + return handler + + @abstractmethod + def handle(self, request: dict): + if self._next_handler: + return self._next_handler.handle(request) + return None + + +class SaveConfigToFileHandler(AbstractHandler): + def handle(self, request: dict): + """ + SaveConfigToFileHandler saves current configuration of profiles, groups and inventory from mongo + to files on the host machine. + + :param request: dictionary with at least one key "yaml_sections". Under this key there should be dictionary + with the following structure + { + "key.to.section": (mongo_collection, MongoToYamlDictConversion, TempFileHandling) + } + where: + - "key.to.section": a key to section of values.yaml file that should be updated (e.g. "scheduler.profiles") + - mongo_collection: mongo collection with configuration of given section + - MongoToYamlDictConversion: implementation of this abstract class + - TempFileHandling: implementation of this abstract class + """ + + yaml = ruamel.yaml.YAML() + values_file_resolved = True + values_file_path = os.path.join(VALUES_DIRECTORY, VALUES_FILE) + + if len(VALUES_FILE) == 0 or (VALUES_FILE.split(".")[1] != "yaml" and VALUES_FILE.split(".")[1] != "yml") or \ + not os.path.exists(os.path.join(VALUES_DIRECTORY, VALUES_FILE)): + # If VALUES_FILE can't be found or wasn't provided, it won't be updated. In this case separate files + # with configuration of specific section will be saved in the hosts machine. + values_file_resolved = False + values = {} + if values_file_resolved: + with open(values_file_path, "r") as file: + values = yaml.load(file) + + if not values_file_resolved or KEEP_TEMP_FILES.lower() in ["t", "true", "y", "yes", "1"]: + delete_temp_files = False + else: + delete_temp_files = True + + for key, value in request["yaml_sections"].items(): + tmp_file_name = TMP_FILE_PREFIX + key.replace(".", "_") + ".yaml" + directory = VALUES_DIRECTORY if not delete_temp_files else TMP_DIR + tmp_file_path = os.path.join(directory, tmp_file_name) + + mongo_collection = value[0] + mongo_to_yaml_conversion = value[1]() + tmp_file_handling = value[2](tmp_file_path) + + documents = list(mongo_collection.find()) + converted = mongo_to_yaml_conversion.convert(documents) + parsed_values = tmp_file_handling.parse_dict_to_yaml(converted, delete_temp_files) + + # update appropriate section values dictionary + values_keys = key.split(".") + sub_dict = values + for value_index, value_key in enumerate(values_keys): + if value_index == len(values_keys)-1: + sub_dict[value_key] = parsed_values + else: + sub_dict = sub_dict.get(value_key, {}) + + if values_file_resolved: + with open(values_file_path, "w") as file: + yaml.dump(values, file) + + next_chain_request = {} + if "next" in request: + next_chain_request = request["next"] + return super().handle(next_chain_request) + + +class CheckJobHandler(AbstractHandler): + def handle(self, request: dict = None): + """ + CheckJobHandler checks whether a new kubernetes job with updated sc4snmp configuration can be run immediately + or should it be scheduled for the future. + + :return: pass dictionary with job_delay in seconds to the next handler + """ + record = list(mongo_config_collection.find())[0] + last_update = record["previous_job_start_time"] + if last_update is None: + # If it's the first time that the job is run (record in mongo_config_collection has been created + # in ApplyChanges class and last_update attribute is None) then job delay should be equal to + # CHANGES_INTERVAL_SECONDS. Update the mongo record with job state accordingly. + job_delay = CHANGES_INTERVAL_SECONDS + mongo_config_collection.update_one({"_id": record["_id"]}, + {"$set": {"previous_job_start_time": datetime.datetime.utcnow()}}) + # time from the last update + time_difference = 0 + else: + # Check how many seconds have elapsed since the last time that the job was run. If the time difference + # is greater than CHANGES_INTERVAL_SECONDS then job can be run immediately. Otherwise, calculate how + # many seconds are left until minimum time difference between updates (CHANGES_INTERVAL_SECONDS). + current_time = datetime.datetime.utcnow() + delta = current_time - last_update + time_difference = delta.total_seconds() + if time_difference > CHANGES_INTERVAL_SECONDS: + job_delay = 1 + else: + job_delay = int(CHANGES_INTERVAL_SECONDS - time_difference) + + result = { + "job_delay": job_delay, + "time_from_last_update": time_difference + } + + current_app.logger.info(f"CheckJobHandler: {result}") + return super().handle(result) + + +class ScheduleHandler(AbstractHandler): + def handle(self, request: dict): + """ + ScheduleHandler schedules the kubernetes job with updated sc4snmp configuration + """ + record = list(mongo_config_collection.find())[0] + if not record["currently_scheduled"]: + # If the task isn't currently scheduled, schedule it and update its state in mongo. + mongo_config_collection.update_one({"_id": record["_id"]}, + {"$set": {"currently_scheduled": True}}) + run_job.apply_async(countdown=request["job_delay"], queue='apply_changes') + current_app.logger.info( + f"ScheduleHandler: scheduling new task with the delay of {request['job_delay']} seconds.") + else: + current_app.logger.info("ScheduleHandler: new job wasn't scheduled.") + return request["job_delay"], record["currently_scheduled"] \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/apply_changes/kubernetes_job.py b/backend/SC4SNMP_UI_backend/apply_changes/kubernetes_job.py new file mode 100644 index 0000000..147d410 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/kubernetes_job.py @@ -0,0 +1,189 @@ +from kubernetes import client +from copy import copy +from celery.utils.log import get_task_logger + +logger = get_task_logger(__name__) + +# Functions in this file create different sections of kubernetes job, +# based on job config yaml file from splunk-connect-for-snmp. + +def create_container(container: dict): + """ + Create a container object from yaml configuration. + + :param container: Parsed yaml configuration of a single container from + spec.template.spec.containers section. + :return: V1Container + """ + name = container["name"] + image = container["image"] + image_pull_policy = container["imagePullPolicy"] + args = container["args"] + env = [] + for e in container["env"]: + env_var = client.V1EnvVar(name=e["name"], + value=e["value"]) + env.append(copy(env_var)) + volume_mounts = [] + for v in container["volumeMounts"]: + vol = client.V1VolumeMount(name=v["name"], + mount_path=v["mountPath"], + read_only=v["readOnly"]) + volume_mounts.append(copy(vol)) + container_object = client.V1Container(name=name, + image=image, + image_pull_policy=image_pull_policy, + args=args, + env=env, + volume_mounts=volume_mounts) + return container_object + + +def create_volume(volume: dict): + """ + Create a volume object from yaml configuration. + + :param volume: Parsed yaml configuration of a single volume from + spec.template.spec.volumes section. + :return: V1Volume + """ + name = volume["name"] + if "configMap" in volume: + config_map_name = volume["configMap"]["name"] + items = [] + for i in volume["configMap"]["items"]: + item = client.V1KeyToPath(key=i["key"], + path=i["path"]) + items.append(copy(item)) + config_map = client.V1ConfigMapVolumeSource(name=config_map_name, + items=items) + volume_object = client.V1Volume(name=name, + config_map=config_map) + else: + volume_object = client.V1Volume(name=name, + empty_dir=client.V1EmptyDirVolumeSource()) + return volume_object + + +def create_pod_spec(spec: dict): + """ + Create spec.template.spec section of a kubernetes job. + + :param spec: Parsed yaml spec.template.spec configuration + :return: V1PodSpec + """ + containers = [create_container(c) for c in spec["containers"]] + volumes = [create_volume(v) for v in spec["volumes"]] + restart_policy = spec["restartPolicy"] + secrets = None + if "imagePullSecrets" in spec: + secrets = [] + for secret in spec["imagePullSecrets"]: + new_secret = client.V1LocalObjectReference(name=secret["name"]) + secrets.append(new_secret) + if secrets is not None: + spec_object = client.V1PodSpec(containers=containers, + volumes=volumes, + restart_policy=restart_policy, + image_pull_secrets=secrets) + else: + spec_object = client.V1PodSpec(containers=containers, + volumes=volumes, + restart_policy=restart_policy) + return spec_object + + +def create_pod_metadata(metadata: dict): + """ + Create spec.template.metadata section of a kubernetes job. + + :param metadata: Parsed yaml spec.template.metadata configuration + :return: V1ObjectMeta + """ + labels = {} + for key, value in metadata["labels"].items(): + labels[key] = value + annotations = None + if "annotations" in metadata: + annotations = {} + for key, value in metadata["annotations"].items(): + annotations[key] = value + if annotations is not None: + metadata_object = client.V1ObjectMeta(annotations=annotations, labels=labels) + else: + metadata_object = client.V1ObjectMeta(labels=labels) + return metadata_object + + +def create_pod_template(pod_template: dict): + """ + Create spec.template section of a kubernetes job. + + :param pod_template: Parsed yaml spec.template configuration + :return: V1PodTemplateSpec + """ + metadata = create_pod_metadata(pod_template["metadata"]) + spec = create_pod_spec(pod_template["spec"]) + template_object = client.V1PodTemplateSpec(metadata=metadata, + spec=spec) + return template_object + + +def create_job_spec(spec: dict): + """ + Create spec section of a kubernetes job. + + :param spec: Parsed yaml job spec configuration + :return:V1JobSpec + """ + ttl_seconds_after_finished = spec["ttlSecondsAfterFinished"] + template = create_pod_template(spec["template"]) + job_spec_object = client.V1JobSpec(ttl_seconds_after_finished=ttl_seconds_after_finished, + template=template) + return job_spec_object + + +def create_job_metadata(metadata: dict): + """ + Create metadata section of a kubernetes job. + + :param metadata: Parsed yaml job metadata configuration + :return: V1ObjectMeta + """ + name = metadata["name"] + labels = {} + for key, value in metadata["labels"].items(): + labels[key] = value + metadata_object = client.V1ObjectMeta(name=name, + labels=labels) + return metadata_object + + +def create_job_object(config_file: dict): + """ + Create job object based on provided configuration file + + :param config_file: Parsed yaml job configuration file + :return: V1Job + """ + metadata = create_job_metadata(config_file["metadata"]) + spec = create_job_spec(config_file["spec"]) + job = client.V1Job( + api_version="batch/v1", + kind="Job", + metadata=metadata, + spec=spec) + + return job + +def create_job(api_instance, job, namespace): + """ + Create new job in kubernetes namespace + """ + if api_instance is None or job is None: + logger.debug("Api instance and job must not be None") + else: + api_response = api_instance.create_namespaced_job( + body=job, + namespace=namespace) + logger.info(f"Job created. status='{str(api_response.status)}'") \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/apply_changes/routes.py b/backend/SC4SNMP_UI_backend/apply_changes/routes.py new file mode 100644 index 0000000..c6089f4 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/routes.py @@ -0,0 +1,22 @@ +from flask import Blueprint, jsonify +from flask_cors import cross_origin +from SC4SNMP_UI_backend.apply_changes.apply_changes import ApplyChanges +import os + +apply_changes_blueprint = Blueprint('common_blueprint', __name__) +JOB_CREATION_RETRIES = int(os.getenv("JOB_CREATION_RETRIES", 10)) + +@apply_changes_blueprint.route("/apply-changes", methods=['POST']) +@cross_origin() +def apply_changes(): + changes = ApplyChanges() + job_delay, currently_scheduled = changes.apply_changes() + if job_delay <= 1 and currently_scheduled: + message = "There might be previous kubernetes job still present in the namespace. Configuration update will be " \ + f"retried {JOB_CREATION_RETRIES} times. If your configuration won't be updated in a few minutes, make sure that " \ + f"snmp-splunk-connect-for-snmp-inventory job isn't present in your kubernetes deployment namespace and " \ + f"click 'Apply changes' button once again." + else: + message = f"Configuration will be updated in approximately {job_delay} seconds." + result = jsonify({"message": message}) + return result, 200 \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/apply_changes/tasks.py b/backend/SC4SNMP_UI_backend/apply_changes/tasks.py new file mode 100644 index 0000000..2e5bfed --- /dev/null +++ b/backend/SC4SNMP_UI_backend/apply_changes/tasks.py @@ -0,0 +1,55 @@ +import time +from celery import shared_task +import datetime +from kubernetes import client, config +import yaml +from kubernetes.client import ApiException +from SC4SNMP_UI_backend.apply_changes.kubernetes_job import create_job_object, create_job +from pymongo import MongoClient +import os +from celery.utils.log import get_task_logger + +MONGO_URI = os.getenv("MONGO_URI") +JOB_NAMESPACE = os.getenv("JOB_NAMESPACE", "sc4snmp") +JOB_CREATION_RETRIES = int(os.getenv("JOB_CREATION_RETRIES", 10)) +JOB_CONFIG_PATH = os.getenv("JOB_CONFIG_PATH", "/config/job_config.yaml") +celery_logger = get_task_logger(__name__) + +@shared_task() +def run_job(): + job = None + batch_v1 = None + with open(JOB_CONFIG_PATH, encoding="utf-8") as file: + config_file = yaml.safe_load(file) + if config_file["apiVersion"] != "batch/v1": + raise ValueError("api version is different from batch/v1") + config.load_incluster_config() + batch_v1 = client.BatchV1Api() + job = create_job_object(config_file) + + with MongoClient(MONGO_URI) as connection: + try_creating = True + iteration = 0 + while try_creating and iteration < JOB_CREATION_RETRIES: + # Try creating a new job. If the previous job is still present in the namespace, + # ApiException will we be raised. In that happens wait for 10 seconds and try creating the job again + try: + create_job(batch_v1, job, JOB_NAMESPACE) + try_creating = False + try: + record = list(connection.sc4snmp.config_collection.find())[0] + connection.sc4snmp.config_collection.update_one({"_id": record["_id"]}, + {"$set": {"previous_job_start_time": datetime.datetime.utcnow(), + "currently_scheduled": False}}) + except Exception as e: + celery_logger.info(f"Error occurred while updating job state after job creation: {str(e)}") + except ApiException: + iteration += 1 + if iteration == JOB_CREATION_RETRIES: + try_creating = False + celery_logger.info(f"Kubernetes job was not created. Max retries ({JOB_CREATION_RETRIES}) exceeded.") + record = list(connection.sc4snmp.config_collection.find())[0] + connection.sc4snmp.config_collection.update_one({"_id": record["_id"]}, + {"$set": {"currently_scheduled": False}}) + else: + time.sleep(10) \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/common/backend_ui_conversions.py b/backend/SC4SNMP_UI_backend/common/backend_ui_conversions.py new file mode 100644 index 0000000..620ad70 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/common/backend_ui_conversions.py @@ -0,0 +1,308 @@ +from abc import abstractmethod + + +def camel_case2snake_case(txt): + return ''.join(['_' + i.lower() if i.isupper() + else i for i in txt]).lstrip('_') + + +def snake_case2camel_case(txt): + result = [] + to_upper = False + for i in range(len(txt)): + if txt[i] != "_": + result.append(txt[i].upper()) if to_upper else result.append(txt[i]) + to_upper = False + elif txt[i] == "_" and i < len(txt) - 1: + to_upper = True + + return ''.join(result) + + +def get_group_or_profile_name_from_backend(document: dict): + group_or_profile_name = None + for key in document.keys(): + if key != "_id": + group_or_profile_name = key + return group_or_profile_name + + +class Conversion: + + @abstractmethod + def backend2ui(self, document: dict, **kwargs): + pass + + @abstractmethod + def ui2backend(self, document: dict, **kwargs): + pass + + +def string_value_to_numeric(value: str): + try: + if value.isnumeric(): + return int(value) + elif value.replace(".", "").isnumeric(): + return float(value) + else: + return value + except ValueError: + return value + + +class ProfileConversion(Conversion): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.__backend2ui_conditional_operations = { + "lt": "less than", + "gt": "greater than", + "equals": "equals", + "in": "in", + "regex": "regex" + } + self.__ui2backend_conditional_operations = {} + for key, value in self.__backend2ui_conditional_operations.items(): + self.__ui2backend_conditional_operations[value] = key + + self.__backend2ui_profile_types = { + "field": "smart", + "base": "base", + "walk": "walk" + } + self.__ui2backend_profile_types = {} + for key, value in self.__backend2ui_profile_types.items(): + self.__ui2backend_profile_types[value] = key + + def backend2ui(self, document: dict, **kwargs): + profile_name = get_group_or_profile_name_from_backend(document) + if "profile_in_inventory" not in kwargs.keys(): + raise ValueError("No profile_in_inventory provided") + elif profile_name is None: + raise ValueError("No profile name detected") + else: + profile_in_inventory = kwargs["profile_in_inventory"] + backend_var_binds = document[profile_name]["varBinds"] + var_binds = [] + for vb in backend_var_binds: + new_vb = { + "component": vb[0], + "object": vb[1] if len(vb) >= 2 else "", + "index": '.'.join(map(str, vb[2:])) if len(vb) >= 3 else "", + } + var_binds.append(new_vb) + + if "condition" in document[profile_name]: + backend_condition = document[profile_name]["condition"] + condition_type = self.__backend2ui_profile_types[backend_condition["type"]] + field = backend_condition["field"] if backend_condition["type"] == "field" else "" + patterns = [{"pattern": p} for p in backend_condition["patterns"]] \ + if backend_condition["type"] == "field" else [] + conditions = { + "condition": condition_type, + "field": field, + "patterns": patterns, + "conditions": [] + } + elif "conditions" in document[profile_name]: + conditional = [] + for back_condition in document[profile_name]["conditions"]: + field = back_condition["field"] + operation = self.__backend2ui_conditional_operations[back_condition["operation"]] + negate_operation = back_condition.get("negate_operation", False) + value = [] + if operation == "in": + for v in back_condition["value"]: + value.append(str(v)) + else: + value.append(str(back_condition["value"])) + conditional.append( + {"field": field, "operation": operation, "value": value, "negateOperation": negate_operation} + ) + conditions = { + "condition": "conditional", + "field": "", + "patterns": [], + "conditions": conditional + } + else: + conditions = { + "condition": "standard", + "field": "", + "patterns": [], + "conditions": [] + } + + result = { + "_id": str(document["_id"]), + "profileName": profile_name, + "frequency": document[profile_name].get("frequency", 1), + "conditions": conditions, + "varBinds": var_binds, + "profileInInventory": profile_in_inventory + } + return result + + def ui2backend(self, document: dict, **kwargs): + conditions = None + condition = None + if document['conditions']['condition'] == "smart": + condition = { + 'type': 'field', + 'field': document['conditions']['field'], + 'patterns': [el['pattern'] for el in document['conditions']['patterns']] + } + elif document['conditions']['condition'] == "conditional": + conditions = [] + for ui_condition in document['conditions']['conditions']: + field = ui_condition["field"] + operation = self.__ui2backend_conditional_operations[ui_condition["operation"]] + if operation == "in": + value = [] + for v in ui_condition["value"]: + value.append(string_value_to_numeric(v)) + else: + value = string_value_to_numeric(ui_condition["value"][0]) + if ui_condition["negateOperation"]: + conditions.append( + {"field": field, "operation": operation, "value": value, "negate_operation": True} + ) + else: + conditions.append( + {"field": field, "operation": operation, "value": value} + ) + elif document['conditions']['condition'] != "standard": + condition = { + 'type': document['conditions']['condition'] + } + var_binds = [] + for var_b in document['varBinds']: + single_var_bind = [var_b['component']] + if len(var_b['object']) > 0: + single_var_bind.append(var_b['object']) + if len(var_b['index']) > 0: + single_var_bind += var_b['index'].split(".") + var_binds.append(single_var_bind) + + item = { + document['profileName']: { + 'varBinds': var_binds + } + } + if document['conditions']['condition'] != "walk": + item[document['profileName']].update({'frequency': int(document['frequency'])}) + if condition is not None: + item[document['profileName']].update({'condition': condition}) + if conditions is not None: + item[document['profileName']].update({'conditions': conditions}) + return item + + +class GroupConversion(Conversion): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def backend2ui(self, document: dict, **kwargs): + if "group_in_inventory" in kwargs.keys(): + group_name = get_group_or_profile_name_from_backend(document) + result = { + "_id": str(document["_id"]), + "groupName": group_name, + "groupInInventory": kwargs["group_in_inventory"] + } + return result + else: + raise ValueError("No group_in_inventory provided") + + def ui2backend(self, document: dict, **kwargs): + result = { + document["groupName"]: [] + } + return result + + +class GroupDeviceConversion(Conversion): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.optional_fields = ["port", "version", "community", "secret", "security_engine"] + + def backend2ui(self, document: dict, **kwargs): + if "group_id" in kwargs.keys() and "device_id" in kwargs.keys(): + group_id = kwargs["group_id"] + device_id = kwargs["device_id"] + result = { + "_id": f"{group_id}-{device_id}", + "groupId": str(group_id), + "address": document['address'] + } + for backend_key in self.optional_fields: + ui_key = snake_case2camel_case(backend_key) + if backend_key in document.keys(): + result.update({f'{ui_key}': str(document[backend_key])}) + else: + result.update({f'{ui_key}': ""}) + return result + else: + raise ValueError("No group_id or device_id provided") + + def ui2backend(self, document: dict, **kwargs): + result = { + "address": document["address"] + } + for backend_key in self.optional_fields: + ui_key = snake_case2camel_case(backend_key) + if len(document[ui_key]) > 0: + result.update({f"{backend_key}": str(document[ui_key])}) + if len(document['port']) > 0: + result.update({"port": int(document['port'])}) + return result + + +class InventoryConversion(Conversion): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def ui2backend(self, document: dict, **kwargs): + if "delete" in kwargs.keys(): + profiles = "" + for i in range(len(document['profiles'])): + profiles += f"{document['profiles'][i]}" + if i < len(document['profiles'])-1: + profiles += ";" + result = { + 'address': document['address'], + 'port': int(document['port']), + 'version': document['version'], + 'community': document['community'], + 'secret': document['secret'], + 'security_engine': document['securityEngine'], + 'walk_interval': document['walkInterval'], + 'profiles': profiles, + 'smart_profiles': document['smartProfiles'], + 'delete': kwargs['delete'] + } + return result + else: + raise ValueError("No delete provided") + + def backend2ui(self, document: dict, **kwargs): + if "inventory_type" not in kwargs.keys(): + raise ValueError("No inventory_type provided") + profiles_mongo = document['profiles'] + if len(profiles_mongo) > 0: + profiles = profiles_mongo.split(";") + else: + profiles = [] + result = { + '_id': str(document["_id"]), + 'inventoryType': kwargs['inventory_type'], + 'address': document['address'], + 'port': str(document['port']), + 'version': document['version'], + 'community': document['community'], + 'secret': document['secret'], + 'securityEngine': document['security_engine'], + 'walkInterval': document['walk_interval'], + 'profiles': profiles, + 'smartProfiles': document['smart_profiles'] + } + return result \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/common/conversions.py b/backend/SC4SNMP_UI_backend/common/conversions.py deleted file mode 100644 index d17dc13..0000000 --- a/backend/SC4SNMP_UI_backend/common/conversions.py +++ /dev/null @@ -1,226 +0,0 @@ -from abc import abstractmethod - - -def camel_case2snake_case(txt): - return ''.join(['_' + i.lower() if i.isupper() - else i for i in txt]).lstrip('_') - - -def snake_case2camel_case(txt): - result = [] - to_upper = False - for i in range(len(txt)): - if txt[i] != "_": - result.append(txt[i].upper()) if to_upper else result.append(txt[i]) - to_upper = False - elif txt[i] == "_" and i < len(txt) - 1: - to_upper = True - continue - - return ''.join(result) - - -def get_group_name_from_backend(document: dict): - group_name = None - for key in document.keys(): - if key != "_id": - group_name = key - return group_name - - -class Conversion: - @abstractmethod - def _ui2backend_map(self, document: dict, **kwargs): - pass - - @abstractmethod - def _backend2ui_map(self, document: dict, **kwargs): - pass - - def backend2ui(self, document: dict, **kwargs): - return self._backend2ui_map(document, **kwargs) - - def ui2backend(self, document: dict, **kwargs): - return self._ui2backend_map(document, **kwargs) - - -class ProfileConversion(Conversion): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def _backend2ui_map(self, document: dict, **kwargs): - profile_name = None - for key in document.keys(): - if key != "_id": - profile_name = key - if profile_name is None: - raise ValueError("No profile name detected") - else: - backend_var_binds = document[profile_name]["varBinds"] - var_binds = [] - for vb in backend_var_binds: - new_vb = { - "family": vb[0], - "category": vb[1] if len(vb) >= 2 else "", - "index": str(vb[2]) if len(vb) == 3 else "", - } - var_binds.append(new_vb) - - if "condition" in document[profile_name]: - backend_condition = document[profile_name]["condition"] - condition_type = backend_condition["type"] - field = backend_condition["field"] if condition_type == "field" else "" - patterns = [{"pattern": p} for p in backend_condition["patterns"]] \ - if condition_type == "field" else None - conditions = { - "condition": condition_type, - "field": field, - "patterns": patterns - } - else: - conditions = { - "condition": "None", - "field": "", - "patterns": None - } - result = { - "_id": str(document["_id"]), - "profileName": profile_name, - "frequency": document[profile_name]["frequency"], - "conditions": conditions, - "varBinds": var_binds - } - return result - - def _ui2backend_map(self, document: dict, **kwargs): - if document['conditions']['condition'] == "field": - conditions = { - 'type': 'field', - 'field': document['conditions']['field'], - 'patterns': [el['pattern'] for el in document['conditions']['patterns']] - } - elif document['conditions']['condition'] == "None": - conditions = None - else: - conditions = { - 'type': document['conditions']['condition'] - } - var_binds = [] - for var_b in document['varBinds']: - single_var_bind = [var_b['family']] - if len(var_b['category']) > 0: - single_var_bind.append(var_b['category']) - if len(var_b['index']) > 0: - single_var_bind.append(int(var_b['index'])) - var_binds.append(single_var_bind) - - item = { - document['profileName']: { - 'frequency': int(document['frequency']), - 'varBinds': var_binds - } - } - if conditions is not None: - item[document['profileName']].update({'condition': conditions}) - return item - - -class GroupConversion(Conversion): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def _backend2ui_map(self, document: dict, **kwargs): - group_name = get_group_name_from_backend(document) - result = { - "_id": str(document["_id"]), - "groupName": group_name - } - return result - - def _ui2backend_map(self, document: dict, **kwargs): - result = { - document["groupName"]: [] - } - return result - - -class GroupDeviceConversion(Conversion): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.optional_fields = ["port", "version", "community", "secret", "security_engine"] - - def _backend2ui_map(self, document: dict, **kwargs): - if "group_id" in kwargs.keys() and "device_id" in kwargs.keys(): - group_id = kwargs["group_id"] - device_id = kwargs["device_id"] - result = { - "_id": f"{group_id}-{device_id}", - "groupId": str(group_id), - "address": document['address'] - } - for backend_key in self.optional_fields: - ui_key = snake_case2camel_case(backend_key) - if backend_key in document.keys(): - result.update({f'{ui_key}': str(document[backend_key])}) - else: - result.update({f'{ui_key}': ""}) - return result - else: - raise ValueError("No group_id or device_id provided") - - def _ui2backend_map(self, document: dict, **kwargs): - result = { - "address": document["address"] - } - for backend_key in self.optional_fields: - ui_key = snake_case2camel_case(backend_key) - if len(document[ui_key]) > 0: - result.update({f"{backend_key}": str(document[ui_key])}) - if len(document['port']) > 0: - result.update({"port": int(document['port'])}) - return result - - -class InventoryConversion(Conversion): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def _ui2backend_map(self, document: dict, **kwargs): - if "delete" in kwargs.keys(): - profiles = "" - for i in range(len(document['profiles'])): - profiles += f"{document['profiles'][i]}" - if i < len(document['profiles'])-1: - profiles += ";" - result = { - 'address': document['address'], - 'port': int(document['port']), - 'version': document['version'], - 'community': document['community'], - 'secret': document['secret'], - 'security_engine': document['securityEngine'], - 'walk_interval': document['walkInterval'], - 'profiles': profiles, - 'smart_profiles': document['smartProfiles'], - 'delete': kwargs['delete'] - } - return result - else: - raise ValueError("No delete provided") - - def _backend2ui_map(self, document: dict, **kwargs): - profiles_mongo = document['profiles'] - profiles = profiles_mongo.split(";") - result = { - '_id': str(document["_id"]), - 'address': document['address'], - 'port': str(document['port']), - 'version': document['version'], - 'community': document['community'], - 'secret': document['secret'], - 'securityEngine': document['security_engine'], - 'walkInterval': document['walk_interval'], - 'profiles': profiles, - 'smartProfiles': document['smart_profiles'] - } - return result diff --git a/backend/SC4SNMP_UI_backend/common/inventory_utils.py b/backend/SC4SNMP_UI_backend/common/inventory_utils.py new file mode 100644 index 0000000..853a234 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/common/inventory_utils.py @@ -0,0 +1,266 @@ +from SC4SNMP_UI_backend import mongo_client +from enum import Enum +from typing import Callable +from bson import ObjectId +from SC4SNMP_UI_backend.common.backend_ui_conversions import InventoryConversion + +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui +inventory_conversion = InventoryConversion() + +class HostConfiguration(Enum): + SINGLE = 1 + GROUP = 2 + +def get_inventory_type(document): + if list(mongo_groups.find({document["address"]: {"$exists": 1}})): + result = "Group" + else: + result = "Host" + return result + +def update_profiles_in_inventory(profile_to_search: str, process_record: Callable, **kwargs): + """ + When profile is edited, then in some cases inventory records using this profile should be updated. + + :param profile_to_search: name of the profile which should be updated in the inventory + :param process_record: function to process profiles in record. It should accept index of the profile to update, + the whole record dictionary and kwargs passed by user. + :param kwargs: additional variables which user can pass to process_record function + :return: + """ + inventory_records = list(mongo_inventory.find({"profiles": {"$regex": f'.*{profile_to_search}.*'}, "delete": False})) + for record in inventory_records: + record_id = record["_id"] + record_updated = inventory_conversion.backend2ui(record, inventory_type=None) # inventory_type isn't used + index_to_update = record_updated["profiles"].index(profile_to_search) + record_updated = process_record(index_to_update, record_updated, kwargs) + record_updated = inventory_conversion.ui2backend(record_updated, delete=False) + mongo_inventory.update_one({"_id": ObjectId(record_id)}, {"$set": record_updated}) + return inventory_records + + +class HandleNewDevice: + def __init__(self, mongo_groups, mongo_inventory): + self._mongo_groups = mongo_groups + self._mongo_inventory = mongo_inventory + + def _is_host_in_group(self, address, port) -> (bool, str, str): + groups_from_inventory = list(self._mongo_inventory.find({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False})) + break_occurred = False + + host_in_group = False + group_id = None + device_id = None + group_name = None + + for group_config in groups_from_inventory: + group_config_name = group_config["address"] + group_name = group_config_name + group_port = group_config["port"] + group = list(self._mongo_groups.find({group_config_name: {"$exists": 1}})) + if len(group) > 0: + group = group[0] + for i, device in enumerate(group[group_config_name]): + device_port = device.get("port", group_port) + if device["address"] == address and int(device_port) == int(port): + host_in_group = True + group_id = str(group["_id"]) + device_id = i + break_occurred = True + break + if break_occurred: + break + + return host_in_group, group_id, device_id, group_name + + def _is_host_configured(self, address: str, port: str): + existing_inventory_record = list(self._mongo_inventory.find({'address': address, 'port': int(port), "delete": False})) + deleted_inventory_record = list(self._mongo_inventory.find({'address': address, 'port': int(port), "delete": True})) + + host_configured = False + host_configuration = None + existing_id_string = None + group_name = None + + if len(existing_inventory_record) > 0: + host_configured = True + host_configuration = HostConfiguration.SINGLE + existing_id_string = str(existing_inventory_record[0]["_id"]) + else: + host_in_group, group_id, device_id, group_name = self._is_host_in_group(address, port) + if host_in_group: + host_configured = True + host_configuration = HostConfiguration.GROUP + existing_id_string = f"{group_id}-{device_id}" + + return host_configured, deleted_inventory_record, host_configuration, existing_id_string, group_name + + def add_single_host(self, address, port, device_object=None, add: bool=True): + host_configured, deleted_inventory_record, host_configuration, existing_id_string, group_name = \ + self._is_host_configured(address, port) + groups = list(mongo_groups.find({address: {"$exists": True}})) + if host_configured: + host_location_message = "in the inventory" if host_configuration == HostConfiguration.SINGLE else \ + f"in group {group_name}" + message = f"Host {address}:{port} already exists {host_location_message}. Record was not added." + host_added = False + elif groups: + message = f"There is a group with the same name configured. Record {address} can't be added as a single host." + host_added = False + else: + if add and device_object is not None: + self._mongo_inventory.insert_one(device_object) + if len(deleted_inventory_record) > 0: + self._mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) + message = None + host_added = True + return host_added, message + + def edit_single_host(self, address: str, port: str, host_id: str, device_object=None, edit: bool=True): + host_configured, deleted_inventory_record, host_configuration, existing_id_string, group_name =\ + self._is_host_configured(address, port) + + if not host_configured or (host_configured and host_id == existing_id_string): + message = "success" + host_edited = True + if edit and device_object is not None: + host_id = ObjectId(host_id) + previous_device_object = list(self._mongo_inventory.find({"_id": host_id}))[0] + if int(port) != int(previous_device_object["port"]) or address != previous_device_object["address"]: + host_added, add_message = self.add_single_host(address, port, device_object, True) + if not host_added: + host_edited = False + message = add_message + else: + self._mongo_inventory.update_one({"_id": ObjectId(host_id)}, {"$set": {"delete": True}}) + message = "Address or port was edited which resulted in deleting the old device and creating " \ + "the new one at the end of the list." + else: + self._mongo_inventory.update_one({"_id": host_id}, {"$set": device_object}) + if len(deleted_inventory_record) > 0: + self._mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) + else: + host_location_message = "in the inventory" if host_configuration == HostConfiguration.SINGLE else \ + f"in group {group_name}" + message = f"Host {address}:{port} already exists {host_location_message}. Record was not edited." + host_edited = False + return host_edited, message + + def add_group_host(self, group_name: str, group_id: ObjectId, device_object: dict): + group_from_inventory = list(self._mongo_inventory.find({"address": group_name, "delete": False})) + group = list(self._mongo_groups.find({"_id": group_id}, {"_id": 0})) + group = group[0] + address = device_object["address"] + port = str(device_object.get("port", "")) + if len(group_from_inventory) > 0: + device_port = port if len(port)>0 else str(group_from_inventory[0]["port"]) + host_added, message = self.add_single_host(address, device_port, add=False) + else: + new_device_port = int(port) if len(port) > 0 else -1 + host_added = True + message = None + for device in group[group_name]: + old_device_port = device.get('port', -1) + if device["address"] == address and old_device_port == new_device_port: + message = f"Host {address}:{port} already exists in group {group_name}. Record was not added." + host_added = False + if host_added: + group[group_name].append(device_object) + new_values = {"$set": group} + self._mongo_groups.update_one({"_id": group_id}, new_values) + return host_added, message + + def edit_group_host(self, group_name: str, group_id: ObjectId, device_id: str, device_object: dict): + group_from_inventory = list(self._mongo_inventory.find({"address": group_name, "delete": False})) + group = list(self._mongo_groups.find({"_id": group_id})) + group = group[0] + address = device_object["address"] + port = str(device_object.get("port", "")) + if len(group_from_inventory) > 0: + device_port = port if len(port) > 0 else str(group_from_inventory[0]["port"]) + host_edited, message = self.edit_single_host(address, device_port, device_id, edit=False) + else: + new_device_port = int(port) if len(port) > 0 else -1 + host_edited = True + message = None + for i, device in enumerate(group[group_name]): + old_device_port = device.get('port', -1) + old_device_id = f"{i}" + if device["address"] == address and old_device_port == new_device_port and old_device_id != device_id: + message = f"Host {address}:{port} already exists in group {group_name}. Record was not edited." + host_edited = False + if host_edited: + group[group_name][int(device_id)] = device_object + new_values = {"$set": group} + mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) + return host_edited, message + + def add_group_to_inventory(self, group_name: str, group_port: str, group_object=None, add: bool = True): + group_added = True + message = None + existing_inventory_record = list(self._mongo_inventory.find({'address': group_name, "delete": False})) + deleted_inventory_record = list(self._mongo_inventory.find({'address': group_name, "delete": True})) + group = list(self._mongo_groups.find({group_name: {"$exists": 1}})) + if len(group) == 0: + group_added = False + message = f"Group {group_name} doesn't exist in the configuration. Record was not added." + elif len(existing_inventory_record) > 0: + group_added = False + message = f"Group {group_name} has already been added to the inventory. Record was not added." + else: + group = group[0] + devices_in_group = dict() + for i, device in enumerate(group[group_name]): + device_port = str(device.get("port", group_port)) + address = device["address"] + device_added, message = self.add_single_host(address, device_port, add=False) + if not device_added: + group_added = False + message = f"Can't add group {group_name}. {message}" + break + else: + if f"{address}:{device_port}" in devices_in_group: + message = f"Can't add group {group_name}. Device {address}:{device_port} was configured multiple times in this group. Record was not added." + group_added = False + break + else: + devices_in_group[f"{address}:{device_port}"] = 1 + + if group_added and add and group_object is not None: + if len(deleted_inventory_record) > 0: + self._mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) + self._mongo_inventory.insert_one(group_object) + return group_added, message + + def edit_group_in_inventory(self, group_name: str, group_id: str, group_object=None, edit: bool = True): + group_id = ObjectId(group_id) + existing_inventory_record = list(self._mongo_inventory.find({'address': group_name, "delete": False})) + deleted_inventory_record = list(self._mongo_inventory.find({'address': group_name, "delete": True})) + group = list(self._mongo_groups.find({group_name: {"$exists": 1}})) + if len(group) == 0: + group_edited = False + message = f"Group {group_name} doesn't exist in the configuration. Record was not edited." + elif len(existing_inventory_record) == 0 or (len(existing_inventory_record) > 0 and existing_inventory_record[0]["_id"] == group_id): + message = "success" + group_edited = True + if edit and group_object is not None: + previous_group_object = list(self._mongo_inventory.find({"_id": group_id}))[0] + if group_name != previous_group_object["address"]: + group_added, add_message = self.add_group_to_inventory(group_name, str(group_object["port"]), group_object, True) + if not group_added: + group_edited = False + message = add_message + else: + self._mongo_inventory.update_one({"_id": ObjectId(group_id)}, {"$set": {"delete": True}}) + message = "Group name was edited which resulted in deleting the old group and creating new " \ + "one at the end of the list." + else: + self._mongo_inventory.update_one({"_id": group_id}, {"$set": group_object}) + if len(deleted_inventory_record) > 0: + self._mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) + else: + message = f"Group with name {group_name} already exists. Record was not edited." + group_edited = False + + return group_edited, message diff --git a/backend/SC4SNMP_UI_backend/groups/__init__.py b/backend/SC4SNMP_UI_backend/groups/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/SC4SNMP_UI_backend/groups/routes.py b/backend/SC4SNMP_UI_backend/groups/routes.py new file mode 100644 index 0000000..cf27d5d --- /dev/null +++ b/backend/SC4SNMP_UI_backend/groups/routes.py @@ -0,0 +1,175 @@ +from bson import ObjectId +from flask import request, Blueprint, jsonify +from flask_cors import cross_origin +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.common.backend_ui_conversions import GroupConversion, GroupDeviceConversion, InventoryConversion, \ + get_group_or_profile_name_from_backend +from copy import copy +from SC4SNMP_UI_backend.common.inventory_utils import HandleNewDevice, get_inventory_type + +groups_blueprint = Blueprint('groups_blueprint', __name__) + +group_conversion = GroupConversion() +group_device_conversion = GroupDeviceConversion() +inventory_conversion = InventoryConversion() +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui + +@groups_blueprint.route('/groups') +@cross_origin() +def get_groups_list(): + groups = mongo_groups.find() + groups_list = [] + for gr in list(groups): + group_name = get_group_or_profile_name_from_backend(gr) + group_in_inventory = True if list(mongo_inventory.find({"address": group_name, "delete": False})) else False + groups_list.append(group_conversion.backend2ui(gr, group_in_inventory=group_in_inventory)) + return jsonify(groups_list) + + +@groups_blueprint.route('/groups/add', methods=['POST']) +@cross_origin() +def add_group_record(): + group_obj = request.json + same_name_groups = list(mongo_groups.find({f"{group_obj['groupName']}": {"$exists": True}})) + if len(same_name_groups) > 0: + result = jsonify( + {"message": f"Group with name {group_obj['groupName']} already exists. Group was not added."}), 400 + elif list(mongo_inventory.find({"address": group_obj['groupName'], "delete": False})): + result = jsonify( + {"message": f"In the inventory there is a record with name {group_obj['groupName']}. Group was not added."} + ), 400 + else: + group_obj = group_conversion.ui2backend(group_obj) + mongo_groups.insert_one(group_obj) + result = jsonify("success") + return result + + +@groups_blueprint.route('/groups/update/', methods=['POST']) +@cross_origin() +def update_group(group_id): + group_obj = request.json + same_name_groups = list(mongo_groups.find({f"{group_obj['groupName']}": {"$exists": True}})) + if len(same_name_groups) > 0: + result = jsonify( + {"message": f"Group with name {group_obj['groupName']} already exists. Group was not edited."}), 400 + elif list(mongo_inventory.find({"address": group_obj['groupName'], "delete": False})): + result = jsonify( + {"message": f"In the inventory there is a record with name {group_obj['groupName']}. Group was not edited."} + ), 400 + else: + old_group = list(mongo_groups.find({'_id': ObjectId(group_id)}))[0] + old_group_name = get_group_or_profile_name_from_backend(old_group) + mongo_groups.update_one({'_id': old_group['_id']}, {"$rename": {f"{old_group_name}": f"{group_obj['groupName']}"}}) + + # Rename corresponding group in the inventory + mongo_inventory.update_one({"address": old_group_name}, {"$set": {"address": group_obj['groupName']}}) + result = jsonify({"message": f"{old_group_name} was also renamed to {group_obj['groupName']} in the inventory"}), 200 + return result + + +@groups_blueprint.route('/groups/delete/', methods=['POST']) +@cross_origin() +def delete_group_and_devices(group_id): + group = list(mongo_groups.find({'_id': ObjectId(group_id)}))[0] + group_name = get_group_or_profile_name_from_backend(group) + configured_in_inventory = False + with mongo_client.start_session() as session: + with session.start_transaction(): + mongo_groups.delete_one({'_id': ObjectId(group_id)}) + if list(mongo_inventory.find({"address": group_name})): + configured_in_inventory = True + mongo_inventory.update_one({"address": group_name}, {"$set": {"delete": True}}) + if configured_in_inventory: + message = f"Group {group_name} was deleted. It was also deleted from the inventory." + else: + message = f"Group {group_name} was deleted." + return jsonify({"message": message}), 200 + + +@groups_blueprint.route('/group//devices/count') +@cross_origin() +def get_devices_count_for_group(group_id): + group = list(mongo_groups.find({"_id": ObjectId(group_id)}))[0] + group_name = get_group_or_profile_name_from_backend(group) + total_count = len(group[group_name]) + return jsonify(total_count) + + +@groups_blueprint.route('/group//devices//') +@cross_origin() +def get_devices_of_group(group_id, page_num, dev_per_page): + page_num = int(page_num) + dev_per_page = int(dev_per_page) + skips = dev_per_page * (page_num - 1) + group = list(mongo_groups.find({"_id": ObjectId(group_id)}))[0] + + group_name = get_group_or_profile_name_from_backend(group) + devices_list = [] + for i, device in enumerate(group[group_name]): + devices_list.append(group_device_conversion.backend2ui(device, group_id=group_id, device_id=copy(i))) + devices_list = devices_list[skips:skips+dev_per_page] + return jsonify(devices_list) + + +@groups_blueprint.route('/group/inventory/') +@cross_origin() +def get_group_config_from_inventory(group_name): + group_from_inventory = list(mongo_inventory.find({"address": group_name, "delete": False})) + if len(group_from_inventory) > 0: + inventory_type = get_inventory_type(group_from_inventory[0]) + result = jsonify(inventory_conversion.backend2ui(group_from_inventory[0], inventory_type=inventory_type)), 200 + else: + result = "", 204 + return result + + +@groups_blueprint.route('/devices/add', methods=['POST']) +@cross_origin() +def add_device_to_group(): + device_obj = request.json + group_id = device_obj["groupId"] + group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] + group_name = get_group_or_profile_name_from_backend(group) + device_obj = group_device_conversion.ui2backend(device_obj) + handler = HandleNewDevice(mongo_groups, mongo_inventory) + host_added, message = handler.add_group_host(group_name, ObjectId(group_id), device_obj) + if host_added: + result = jsonify("success"), 200 + else: + result = jsonify({"message": message}), 400 + return result + + +@groups_blueprint.route('/devices/update/', methods=['POST']) +@cross_origin() +def update_device_from_group(device_id): + device_obj = request.json + group_id = device_id.split("-")[0] + device_id = device_id.split("-")[1] + group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] + device_obj = group_device_conversion.ui2backend(device_obj) + group_name = get_group_or_profile_name_from_backend(group) + handler = HandleNewDevice(mongo_groups, mongo_inventory) + + host_edited, message = handler.edit_group_host(group_name, ObjectId(group_id), device_id, device_obj, ) + if host_edited: + result = jsonify("success"), 200 + else: + result = jsonify({"message": message}), 400 + return result + + +@groups_blueprint.route('/devices/delete/', methods=['POST']) +@cross_origin() +def delete_device_from_group_record(device_id: str): + group_id = device_id.split("-")[0] + device_id = device_id.split("-")[1] + group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] + group_name = get_group_or_profile_name_from_backend(group) + removed_device = group[group_name].pop(int(device_id)) + device_name = f"{removed_device['address']}:{removed_device.get('port','')}" + new_values = {"$set": group} + mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) + return jsonify({"message": f"Device {device_name} from group {group_name} was deleted."}), 200 \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/inventory/__init__.py b/backend/SC4SNMP_UI_backend/inventory/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/SC4SNMP_UI_backend/inventory/routes.py b/backend/SC4SNMP_UI_backend/inventory/routes.py new file mode 100644 index 0000000..6966019 --- /dev/null +++ b/backend/SC4SNMP_UI_backend/inventory/routes.py @@ -0,0 +1,93 @@ +from bson import ObjectId +from flask import request, Blueprint, jsonify +from flask_cors import cross_origin +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.common.backend_ui_conversions import InventoryConversion +from SC4SNMP_UI_backend.common.inventory_utils import HandleNewDevice, get_inventory_type + +inventory_blueprint = Blueprint('inventory_blueprint', __name__) + +inventory_conversion = InventoryConversion() +mongo_groups = mongo_client.sc4snmp.groups_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui + +@inventory_blueprint.route('/inventory//') +@cross_origin() +def get_inventory_list(page_num, dev_per_page): + page_num = int(page_num) + dev_per_page = int(dev_per_page) + skips = dev_per_page * (page_num - 1) + + inventory = list(mongo_inventory.find({"delete": False}).skip(skips).limit(dev_per_page)) + inventory_list = [] + for inv in inventory: + inventory_type = get_inventory_type(inv) + inventory_list.append(inventory_conversion.backend2ui(inv, inventory_type=inventory_type)) + return jsonify(inventory_list) + + +@inventory_blueprint.route('/inventory/count') +@cross_origin() +def get_inventory_count(): + total_count = mongo_inventory.count_documents({"delete": False}) + return jsonify(total_count) + + +@inventory_blueprint.route('/inventory/add', methods=['POST']) +@cross_origin() +def add_inventory_record(): + inventory_obj = request.json + inventory_type = inventory_obj["inventoryType"] + inventory_obj = inventory_conversion.ui2backend(inventory_obj, delete=False) + handler = HandleNewDevice(mongo_groups, mongo_inventory) + if inventory_type == "Host": + record_added, message = handler.add_single_host(inventory_obj["address"], str(inventory_obj["port"]), + inventory_obj, True) + else: + record_added, message = handler.add_group_to_inventory(inventory_obj["address"], str(inventory_obj["port"]), + inventory_obj, True) + if record_added and message is not None: + result = jsonify({"message": message}), 200 + elif record_added: + result = jsonify("success"), 200 + else: + result = jsonify({"message": message}), 400 + return result + + +@inventory_blueprint.route('/inventory/delete/', methods=['POST']) +@cross_origin() +def delete_inventory_record(inventory_id): + mongo_inventory.update_one({"_id": ObjectId(inventory_id)}, {"$set": {"delete": True}}) + inventory_item = list(mongo_inventory.find({"_id": ObjectId(inventory_id)}))[0] + address = inventory_item['address'] + port = f":{inventory_item['port']}" if address[0].isnumeric() else "" + return jsonify({"message": f"{address}{port} was deleted."}), 200 + + +@inventory_blueprint.route('/inventory/update/', methods=['POST']) +@cross_origin() +def update_inventory_record(inventory_id): + inventory_obj = request.json + inventory_type = inventory_obj["inventoryType"] + inventory_obj = inventory_conversion.ui2backend(inventory_obj, delete=False) + current_inventory = list(mongo_inventory.find({"_id": ObjectId(inventory_id)}))[0] + current_inventory_type = get_inventory_type(current_inventory) + handler = HandleNewDevice(mongo_groups, mongo_inventory) + + if inventory_type != current_inventory_type: + result = jsonify({"message": "Can't edit single host to the group or group to the single host"}), 400 + else: + if inventory_type == "Host": + record_edited, message = handler.edit_single_host(inventory_obj["address"], str(inventory_obj["port"]), + str(inventory_id), inventory_obj, True) + else: + record_edited, message = handler.edit_group_in_inventory(inventory_obj["address"], str(inventory_id), inventory_obj, True) + if record_edited: + if message == "success" or message is None: + result = jsonify("success"), 200 + else: + result = jsonify({"message": message}), 200 + else: + result = jsonify({"message": message}), 400 + return result diff --git a/backend/SC4SNMP_UI_backend/profiles/__init__.py b/backend/SC4SNMP_UI_backend/profiles/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/SC4SNMP_UI_backend/profiles/routes.py b/backend/SC4SNMP_UI_backend/profiles/routes.py new file mode 100644 index 0000000..cdc0e4c --- /dev/null +++ b/backend/SC4SNMP_UI_backend/profiles/routes.py @@ -0,0 +1,130 @@ +from bson import ObjectId +from flask import request, Blueprint, jsonify +from flask_cors import cross_origin +from SC4SNMP_UI_backend import mongo_client +from SC4SNMP_UI_backend.common.backend_ui_conversions import ProfileConversion, get_group_or_profile_name_from_backend +from SC4SNMP_UI_backend.common.inventory_utils import update_profiles_in_inventory + +profiles_blueprint = Blueprint('profiles_blueprint', __name__) + +profile_conversion = ProfileConversion() +mongo_profiles = mongo_client.sc4snmp.profiles_ui +mongo_inventory = mongo_client.sc4snmp.inventory_ui + +# @cross_origin(origins='*', headers=['access-control-allow-origin', 'Content-Type']) +@profiles_blueprint.route('/profiles/names') +@cross_origin() +def get_profile_names(): + profiles = list(mongo_profiles.find()) + profiles_list = [] + for pr in profiles: + converted = profile_conversion.backend2ui(pr, profile_in_inventory=True) + if converted['conditions']['condition'] not in ['mandatory', 'base']: + profiles_list.append(converted) + return jsonify([el["profileName"] for el in profiles_list]) + +@profiles_blueprint.route('/profiles/count') +@cross_origin() +def get_profiles_count(): + total_count = mongo_profiles.count_documents({}) + return jsonify(total_count) + +@profiles_blueprint.route('/profiles//') +@cross_origin() +def get_profiles_list(page_num, prof_per_page): + page_num = int(page_num) + prof_per_page = int(prof_per_page) + skips = prof_per_page * (page_num - 1) + + profiles = list(mongo_profiles.find().skip(skips).limit(prof_per_page)) + profiles_list = [] + for pr in profiles: + profile_name = get_group_or_profile_name_from_backend(pr) + profile_in_inventory = True if list(mongo_inventory.find({"profiles": {"$regex": f'.*{profile_name}.*'}, + "delete": False})) else False + converted = profile_conversion.backend2ui(pr, profile_in_inventory=profile_in_inventory) + if converted['conditions']['condition'] not in ['mandatory']: + profiles_list.append(converted) + return jsonify(profiles_list) + + +@profiles_blueprint.route('/profiles') +@cross_origin() +def get_all_profiles_list(): + profiles = list(mongo_profiles.find()) + profiles_list = [] + for pr in profiles: + converted = profile_conversion.backend2ui(pr, profile_in_inventory=True) + if converted['conditions']['condition'] not in ['mandatory']: + profiles_list.append(converted) + return jsonify(profiles_list) + + +@profiles_blueprint.route('/profiles/add', methods=['POST']) +@cross_origin() +def add_profile_record(): + profile_obj = request.json + same_name_profiles = list(mongo_profiles.find({f"{profile_obj['profileName']}": {"$exists": True}})) + if len(same_name_profiles) > 0: + result = jsonify( + {"message": f"Profile with name {profile_obj['profileName']} already exists. Profile was not added."}), 400 + else: + profile_obj = profile_conversion.ui2backend(profile_obj) + mongo_profiles.insert_one(profile_obj) + result = jsonify("success") + return result + +@profiles_blueprint.route('/profiles/delete/', methods=['POST']) +@cross_origin() +def delete_profile_record(profile_id): + profile = list(mongo_profiles.find({'_id': ObjectId(profile_id)}, {"_id": 0}))[0] + profile_name = list(profile.keys())[0] + + # Find records from inventory where this profile was used. + def delete_profile(index, record_to_update, kwargs): + record_to_update["profiles"].pop(index) + return record_to_update + inventory_records = update_profiles_in_inventory(profile_name, delete_profile) + if inventory_records: + message = f"Profile {profile_name} was deleted. It was also deleted from some inventory records." + else: + message = f"Profile {profile_name} was deleted." + + mongo_profiles.delete_one({'_id': ObjectId(profile_id)}) + return jsonify({"message": message}), 200 + + +@profiles_blueprint.route('/profiles/update/', methods=['POST']) +@cross_origin() +def update_profile_record(profile_id): + profile_obj = request.json + new_profile_name = profile_obj['profileName'] + + same_name_profiles = list(mongo_profiles.find({f"{new_profile_name}": {"$exists": True}, "_id": {"$ne": ObjectId(profile_id)}})) + if len(same_name_profiles) > 0: + return jsonify( + {"message": f"Profile with name {new_profile_name} already exists. Profile was not edited."}), 400 + + profile_obj = profile_conversion.ui2backend(profile_obj) + + old_profile = list(mongo_profiles.find({'_id': ObjectId(profile_id)}, {"_id": 0}))[0] + old_profile_name = list(old_profile.keys())[0] + + # If profile name was changed update it and also update all inventory records where this profile is used + if old_profile_name != new_profile_name: + mongo_profiles.update_one({'_id': ObjectId(profile_id)}, + {"$rename": {f"{old_profile_name}": f"{new_profile_name}"}}) + + def update_name(index, record_to_update, kwargs): + record_to_update["profiles"][index] = kwargs["new_name"] + return record_to_update + update_profiles_in_inventory(old_profile_name, update_name, new_name=new_profile_name) + + result = jsonify({"message": f"If {old_profile_name} was used in some records in the inventory," + f" it was updated to {new_profile_name}"}), 200 + else: + result = jsonify("success"), 200 + + mongo_profiles.update_one({'_id': ObjectId(profile_id)}, + {"$set": {new_profile_name: profile_obj[new_profile_name]}}) + return result \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/ui_handling/helpers.py b/backend/SC4SNMP_UI_backend/ui_handling/helpers.py deleted file mode 100644 index cd6cb91..0000000 --- a/backend/SC4SNMP_UI_backend/ui_handling/helpers.py +++ /dev/null @@ -1,97 +0,0 @@ -from SC4SNMP_UI_backend import mongo_client -from enum import Enum -from typing import Callable -from bson import ObjectId -from flask import jsonify -from SC4SNMP_UI_backend.common.conversions import InventoryConversion - -mongo_groups = mongo_client.sc4snmp.groups_ui -mongo_inventory = mongo_client.sc4snmp.inventory_ui -inventory_conversion = InventoryConversion() - -class InventoryAddEdit(Enum): - ADD = 1 - EDIT = 2 - - -def check_if_inventory_can_be_added(inventory_obj, change_type: InventoryAddEdit, inventory_id): - """ - Before updating or adding new inventory check if it can be done. For example users shouldn't add new - inventory if the same inventory already exists. - - :param inventory_obj: new inventory object to be added/updated - :param change_type: InventoryAddEdit.EDIT or InventoryAddEdit.ADD - :param inventory_id: id of the inventory to be edited - :return: - """ - - address = inventory_obj['address'] - port = inventory_obj['port'] - message = "added" if change_type == InventoryAddEdit.ADD else "edited" - inventory_id = ObjectId(inventory_id) if change_type == InventoryAddEdit.EDIT else None - - check_duplicates = False - if address[0].isdigit(): - # record is a single host - existing_inventory_record = list(mongo_inventory.find({'address': address, 'port': port, "delete": False})) - - # check if there is any record for this device which has been assigned to be deleted - deleted_inventory_record = list(mongo_inventory.find({'address': address, 'port': port, "delete": True})) - identifier = f"{address}:{port}" - check_duplicates = True - else: - # record is a group - existing_inventory_record = list(mongo_inventory.find({'address': address, "delete": False})) - - # check if there is any record for this group which has been assigned to be deleted - deleted_inventory_record = list(mongo_inventory.find({'address': address, "delete": True})) - identifier = address - group = list(mongo_groups.find({address: {"$exists": 1}})) - if len(group) == 0: - result = jsonify({"message": f"There is no group {address} configured. Record was not {message}."}), 400 - else: - check_duplicates = True - - if check_duplicates: - # check if the same record already exist in the inventory - if len(existing_inventory_record) == 0: - make_change = True - elif existing_inventory_record[0]["_id"] == inventory_id and change_type == InventoryAddEdit.EDIT: - make_change = True - else: - make_change = False - - if make_change: - if change_type == InventoryAddEdit.ADD: - mongo_inventory.insert_one(inventory_obj) - else: - mongo_inventory.update_one({"_id": inventory_id}, {"$set": inventory_obj}) - - if len(deleted_inventory_record) > 0: - mongo_inventory.delete_one({"_id": deleted_inventory_record[0]["_id"]}) - result = jsonify("success"), 200 - else: - result = jsonify( - {"message": f"Inventory record for {identifier} already exists. Record was not {message}."}), 400 - - return result - - -def update_profiles_in_inventory(profile_to_search: str, process_record: Callable, **kwargs): - """ - When profile is edited, then in some cases inventory records using this profile should be updated. - - :param profile_to_search: name of the profile which should be updated in the inventory - :param process_record: function to process profiles in record. It should accept index of profile to update, - whole record dictionary and kwargs passed by user. - :param kwargs: additional variables which user can pass to process_record function - :return: - """ - inventory_records = list(mongo_inventory.find({"profiles": {"$regex": f'.*{profile_to_search}.*'}})) - for record in inventory_records: - record_id = record["_id"] - record_updated = inventory_conversion.backend2ui(record) - index_to_update = record_updated["profiles"].index(profile_to_search) - record_updated = process_record(index_to_update, record_updated, kwargs) - record_updated = inventory_conversion.ui2backend(record_updated, delete=False) - mongo_inventory.update_one({"_id": ObjectId(record_id)}, {"$set": record_updated}) \ No newline at end of file diff --git a/backend/SC4SNMP_UI_backend/ui_handling/routes.py b/backend/SC4SNMP_UI_backend/ui_handling/routes.py deleted file mode 100644 index de23194..0000000 --- a/backend/SC4SNMP_UI_backend/ui_handling/routes.py +++ /dev/null @@ -1,309 +0,0 @@ -from bson import ObjectId -from flask import request, Blueprint, jsonify -from flask_cors import cross_origin -from SC4SNMP_UI_backend import mongo_client -from SC4SNMP_UI_backend.common.conversions import ProfileConversion, GroupConversion, GroupDeviceConversion, \ - InventoryConversion, get_group_name_from_backend -from copy import copy -from SC4SNMP_UI_backend.ui_handling.helpers import update_profiles_in_inventory, check_if_inventory_can_be_added, \ - InventoryAddEdit - -ui = Blueprint('ui', __name__) - -profile_conversion = ProfileConversion() -group_conversion = GroupConversion() -group_device_conversion = GroupDeviceConversion() -inventory_conversion = InventoryConversion() -mongo_profiles = mongo_client.sc4snmp.profiles_ui -mongo_groups = mongo_client.sc4snmp.groups_ui -mongo_inventory = mongo_client.sc4snmp.inventory_ui - - -# @cross_origin(origins='*', headers=['access-control-allow-origin', 'Content-Type']) -@ui.route('/profiles/names') -@cross_origin() -def get_profile_names(): - profiles = list(mongo_profiles.find()) - profiles_list = [] - for pr in profiles: - converted = profile_conversion.backend2ui(pr) - if converted['conditions']['condition'] not in ['mandatory', 'base']: - profiles_list.append(converted) - return jsonify([el["profileName"] for el in profiles_list]) - -@ui.route('/profiles/count') -@cross_origin() -def get_profiles_count(): - total_count = mongo_profiles.count_documents({}) - return jsonify(total_count) - -@ui.route('/profiles//') -@cross_origin() -def get_profiles_list(page_num, prof_per_page): - page_num = int(page_num) - prof_per_page = int(prof_per_page) - skips = prof_per_page * (page_num - 1) - - profiles = list(mongo_profiles.find().skip(skips).limit(prof_per_page)) - profiles_list = [] - for pr in profiles: - converted = profile_conversion.backend2ui(pr) - if converted['conditions']['condition'] not in ['mandatory']: - profiles_list.append(converted) - return jsonify(profiles_list) - - -@ui.route('/profiles') -@cross_origin() -def get_all_profiles_list(): - profiles = list(mongo_profiles.find()) - profiles_list = [] - for pr in profiles: - converted = profile_conversion.backend2ui(pr) - if converted['conditions']['condition'] not in ['mandatory']: - profiles_list.append(converted) - return jsonify(profiles_list) - - -@ui.route('/profiles/add', methods=['POST']) -@cross_origin() -def add_profile_record(): - profile_obj = request.json - same_name_profiles = list(mongo_profiles.find({f"{profile_obj['profileName']}": {"$exists": True}})) - if len(same_name_profiles) > 0: - result = jsonify( - {"message": f"Profile with name {profile_obj['profileName']} already exists. Profile was not added."}), 400 - else: - profile_obj = profile_conversion.ui2backend(profile_obj) - mongo_profiles.insert_one(profile_obj) - result = jsonify("success") - return result - -@ui.route('/profiles/delete/', methods=['POST']) -@cross_origin() -def delete_profile_record(profile_id): - profile = list(mongo_profiles.find({'_id': ObjectId(profile_id)}, {"_id": 0}))[0] - profile_name = list(profile.keys())[0] - - # Find records from inventory where this profile was used. - def delete_profile(index, record_to_update, kwargs): - record_to_update["profiles"].pop(index) - return record_to_update - update_profiles_in_inventory(profile_name, delete_profile) - - mongo_profiles.delete_one({'_id': ObjectId(profile_id)}) - return jsonify({"message": f"If {profile_name} was used in some records in the inventory," - f" those records were updated"}), 200 - - -@ui.route('/profiles/update/', methods=['POST']) -@cross_origin() -def update_profile_record(profile_id): - profile_obj = request.json - new_profile_name = profile_obj['profileName'] - - same_name_profiles = list(mongo_profiles.find({f"{new_profile_name}": {"$exists": True}, "_id": {"$ne": ObjectId(profile_id)}})) - if len(same_name_profiles) > 0: - return jsonify( - {"message": f"Profile with name {new_profile_name} already exists. Profile was not edited."}), 400 - - profile_obj = profile_conversion.ui2backend(profile_obj) - - old_profile = list(mongo_profiles.find({'_id': ObjectId(profile_id)}, {"_id": 0}))[0] - old_profile_name = list(old_profile.keys())[0] - - # If profile name was changed update it and also update all inventory records where this profile is used - if old_profile_name != new_profile_name: - mongo_profiles.update_one({'_id': ObjectId(profile_id)}, - {"$rename": {f"{old_profile_name}": f"{new_profile_name}"}}) - - def update_name(index, record_to_update, kwargs): - record_to_update["profiles"][index] = kwargs["new_name"] - return record_to_update - update_profiles_in_inventory(old_profile_name, update_name, new_name=new_profile_name) - - result = jsonify({"message": f"If {old_profile_name} was used in some records in the inventory," - f" it was updated to {new_profile_name}"}), 200 - else: - result = jsonify("success"), 200 - - mongo_profiles.update_one({'_id': ObjectId(profile_id)}, - {"$set": {new_profile_name: profile_obj[new_profile_name]}}) - return result - - -@ui.route('/groups') -@cross_origin() -def get_groups_list(): - groups = mongo_groups.find() - groups_list = [] - for gr in list(groups): - groups_list.append(group_conversion.backend2ui(gr)) - return jsonify(groups_list) - - -@ui.route('/groups/add', methods=['POST']) -@cross_origin() -def add_group_record(): - group_obj = request.json - same_name_groups = list(mongo_groups.find({f"{group_obj['groupName']}": {"$exists": True}})) - if len(same_name_groups) > 0: - result = jsonify( - {"message": f"Group with name {group_obj['groupName']} already exists. Group was not added."}), 400 - else: - group_obj = group_conversion.ui2backend(group_obj) - mongo_groups.insert_one(group_obj) - result = jsonify("success") - return result - - -@ui.route('/groups/update/', methods=['POST']) -@cross_origin() -def update_group(group_id): - group_obj = request.json - same_name_groups = list(mongo_groups.find({f"{group_obj['groupName']}": {"$exists": True}})) - if len(same_name_groups) > 0: - result = jsonify( - {"message": f"Group with name {group_obj['groupName']} already exists. Group was not edited."}), 400 - else: - old_group = list(mongo_groups.find({'_id': ObjectId(group_id)}))[0] - old_group_name = get_group_name_from_backend(old_group) - mongo_groups.update_one({'_id': old_group['_id']}, {"$rename": {f"{old_group_name}": f"{group_obj['groupName']}"}}) - - # Rename corresponding group in the inventory - mongo_inventory.update_one({"address": old_group_name}, {"$set": {"address": group_obj['groupName']}}) - result = jsonify({"message": f"{old_group_name} was also renamed to {group_obj['groupName']} in the inventory"}), 200 - return result - - -@ui.route('/groups/delete/', methods=['POST']) -@cross_origin() -def delete_group_and_devices(group_id): - group = list(mongo_groups.find({'_id': ObjectId(group_id)}))[0] - group_name = get_group_name_from_backend(group) - with mongo_client.start_session() as session: - with session.start_transaction(): - mongo_groups.delete_one({'_id': ObjectId(group_id)}) - mongo_inventory.update_one({"address": group_name}, {"$set": {"delete": True}}) - return jsonify({"message": f"If {group_name} was configured in the inventory, it was deleted from there"}), 200 - - -@ui.route('/group//devices/count') -@cross_origin() -def get_devices_count_for_group(group_id): - group = list(mongo_groups.find({"_id": ObjectId(group_id)}))[0] - group_name = get_group_name_from_backend(group) - total_count = len(group[group_name]) - return jsonify(total_count) - - -@ui.route('/group//devices//') -@cross_origin() -def get_devices_of_group(group_id, page_num, dev_per_page): - page_num = int(page_num) - dev_per_page = int(dev_per_page) - skips = dev_per_page * (page_num - 1) - group = list(mongo_groups.find({"_id": ObjectId(group_id)}))[0] - - group_name = get_group_name_from_backend(group) - devices_list = [] - for i, device in enumerate(group[group_name]): - devices_list.append(group_device_conversion.backend2ui(device, group_id=group_id, device_id=copy(i))) - devices_list = devices_list[skips:skips+dev_per_page] - return jsonify(devices_list) - - -@ui.route('/devices/add', methods=['POST']) -@cross_origin() -def add_device_to_group(): - device_obj = request.json - group_id = device_obj["groupId"] - group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] - device_obj = group_device_conversion.ui2backend(device_obj) - - new_device_port = device_obj.get('port', -1) - group_name = get_group_name_from_backend(group) - for device in group[group_name]: - old_device_port = device.get('port', -1) - if device["address"] == device_obj["address"] and old_device_port == new_device_port: - return jsonify( - {"message": f"Device {device_obj['address']}:{device_obj.get('port', '')} already exists. " - f"Record was not added"}), 400 - - group[group_name].append(device_obj) - new_values = {"$set": group} - mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) - return jsonify("success") - - -@ui.route('/devices/update/', methods=['POST']) -@cross_origin() -def update_device_from_group(device_id): - device_obj = request.json - group_id = device_id.split("-")[0] - device_id = device_id.split("-")[1] - group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] - device_obj = group_device_conversion.ui2backend(device_obj) - - group_name = get_group_name_from_backend(group) - group[group_name][int(device_id)] = device_obj - new_values = {"$set": group} - mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) - return jsonify("success") - - -@ui.route('/devices/delete/', methods=['POST']) -@cross_origin() -def delete_device_from_group_record(device_id: str): - group_id = device_id.split("-")[0] - device_id = device_id.split("-")[1] - group = list(mongo_groups.find({'_id': ObjectId(group_id)}, {"_id": 0}))[0] - group_name = get_group_name_from_backend(group) - group[group_name].pop(int(device_id)) - new_values = {"$set": group} - mongo_groups.update_one({"_id": ObjectId(group_id)}, new_values) - return jsonify("success") - - -@ui.route('/inventory//') -@cross_origin() -def get_inventory_list(page_num, dev_per_page): - page_num = int(page_num) - dev_per_page = int(dev_per_page) - skips = dev_per_page * (page_num - 1) - - inventory = list(mongo_inventory.find({"delete": False}).skip(skips).limit(dev_per_page)) - inventory_list = [] - for inv in inventory: - inventory_list.append(inventory_conversion.backend2ui(inv)) - return jsonify(inventory_list) - - -@ui.route('/inventory/count') -@cross_origin() -def get_inventory_count(): - total_count = mongo_inventory.count_documents({"delete": False}) - return jsonify(total_count) - - -@ui.route('/inventory/add', methods=['POST']) -@cross_origin() -def add_inventory_record(): - inventory_obj = request.json - inventory_obj = inventory_conversion.ui2backend(inventory_obj, delete=False) - return check_if_inventory_can_be_added(inventory_obj, InventoryAddEdit.ADD, None) - - -@ui.route('/inventory/delete/', methods=['POST']) -@cross_origin() -def delete_inventory_record(inventory_id): - mongo_inventory.update_one({"_id": ObjectId(inventory_id)}, {"$set": {"delete": True}}) - return jsonify("success") - - -@ui.route('/inventory/update/', methods=['POST']) -@cross_origin() -def update_inventory_record(inventory_id): - inventory_obj = request.json - inventory_obj = inventory_conversion.ui2backend(inventory_obj, delete=False) - return check_if_inventory_can_be_added(inventory_obj, InventoryAddEdit.EDIT, inventory_id) diff --git a/backend/app.py b/backend/app.py index a072c18..759eedd 100644 --- a/backend/app.py +++ b/backend/app.py @@ -1,6 +1,7 @@ from SC4SNMP_UI_backend import create_app -app = create_app() +flask_app = create_app() +celery_app = flask_app.extensions["celery"] if __name__ == '__main__': - app.run() + flask_app.run() diff --git a/backend/celery_start.sh b/backend/celery_start.sh new file mode 100644 index 0000000..0628580 --- /dev/null +++ b/backend/celery_start.sh @@ -0,0 +1,5 @@ +set -o errexit +set -o nounset + +cd /app +celery -A app worker -Q apply_changes --loglevel INFO \ No newline at end of file diff --git a/backend/flask_start.sh b/backend/flask_start.sh new file mode 100644 index 0000000..38d5ce4 --- /dev/null +++ b/backend/flask_start.sh @@ -0,0 +1,4 @@ +set -o errexit +set -o nounset +cd /app +gunicorn -b :5000 app:flask_app --log-level INFO \ No newline at end of file diff --git a/backend/package-lock.json b/backend/package-lock.json deleted file mode 100644 index d8fb6a1..0000000 --- a/backend/package-lock.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "name": "flask_inventory", - "lockfileVersion": 2, - "requires": true, - "packages": { - "": { - "dependencies": { - "axios": "^0.27.2" - } - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "node_modules/axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", - "dependencies": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" - } - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/follow-redirects": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", - "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - } - }, - "dependencies": { - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "axios": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz", - "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==", - "requires": { - "follow-redirects": "^1.14.9", - "form-data": "^4.0.0" - } - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" - }, - "follow-redirects": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz", - "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==" - }, - "form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - } - }, - "mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" - }, - "mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "requires": { - "mime-db": "1.52.0" - } - } - } -} diff --git a/backend/package.json b/backend/package.json deleted file mode 100644 index 9cf5ca6..0000000 --- a/backend/package.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "dependencies": { - "axios": "^0.27.2" - } -} diff --git a/backend/requirements.txt b/backend/requirements.txt index f676171..620a31d 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -7,5 +7,11 @@ MarkupSafe==2.1.1 pymongo==4.1.1 six==1.16.0 Werkzeug==2.2.3 -pytest +pytest~=7.2.0 gunicorn +kubernetes~=26.1.0 +python-dotenv~=0.21.0 +PyYAML~=6.0 +celery==5.2.7 +redis==4.5.5 +ruamel.yaml===0.17.32 \ No newline at end of file diff --git a/backend/tests/common/__init__.py b/backend/tests/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/common/test_conversions.py b/backend/tests/common/test_backend_ui_conversions.py similarity index 66% rename from backend/tests/common/test_conversions.py rename to backend/tests/common/test_backend_ui_conversions.py index ac19844..b1ef9d6 100644 --- a/backend/tests/common/test_conversions.py +++ b/backend/tests/common/test_backend_ui_conversions.py @@ -1,5 +1,5 @@ from unittest import TestCase -from SC4SNMP_UI_backend.common.conversions import ProfileConversion, GroupConversion, GroupDeviceConversion, \ +from SC4SNMP_UI_backend.common.backend_ui_conversions import ProfileConversion, GroupConversion, GroupDeviceConversion, \ InventoryConversion from bson import ObjectId @@ -13,6 +13,7 @@ class TestConversions(TestCase): @classmethod def setUpClass(cls): + cls.maxDiff = None common_id = "635916b2c8cb7a15f28af40a" cls.ui_prof_1 = { @@ -20,13 +21,16 @@ def setUpClass(cls): "profileName": "profile_1", "frequency": 10, "conditions": { - "condition": "None", + "condition": "standard", "field": "", - "patterns": None + "patterns": [], + "conditions": [] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1.test.2"}, + {"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + "profileInInventory": True } cls.ui_prof_2 = { @@ -36,11 +40,13 @@ def setUpClass(cls): "conditions": { "condition": "base", "field": "", - "patterns": None + "patterns": [], + "conditions": [] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + "profileInInventory": False } cls.ui_prof_3 = { @@ -48,20 +54,46 @@ def setUpClass(cls): "profileName": "profile_3", "frequency": 30, "conditions": { - "condition": "field", + "condition": "smart", "field": "SNMPv2-MIB.sysObjectID", - "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] + "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}], + "conditions": [] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + "profileInInventory": True + } + + cls.ui_prof_4 = { + "_id": common_id, + "profileName": "profile_4", + "frequency": 30, + "conditions": { + "condition": "conditional", + "field": "", + "patterns": [], + "conditions": [ + {"field": "field: IF-MIB.ifAdminStatus", "operation": "in", "value":["0", "down"], 'negateOperation': False,}, + {"field": "field: IF-MIB.ifOperStatus", "operation": "equals", "value": ["up"], 'negateOperation': True,}, + {"field": "field: IF-MIB.ifIndex", "operation": "less than", "value": ["3"], 'negateOperation': False,}, + {"field": "field: IF-MIB.ifIndex", "operation": "greater than", "value": ["5"], 'negateOperation': True,} + ] + }, + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + "profileInInventory": False } cls.backend_prof_1 = { "_id": ObjectId(common_id), "profile_1": { "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + "varBinds": [["IF-MIB", "ifInDiscards", "1", "test", "2"], + ["IF-MIB", "ifInDiscards", "1"], + ["IF-MIB"], + ["IF-MIB", "ifOutErrors"]] } } @@ -70,7 +102,7 @@ def setUpClass(cls): "profile_2": { "frequency": 20, "condition": {"type": "base"}, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] } } @@ -81,7 +113,21 @@ def setUpClass(cls): "condition": {"type": "field", "field": "SNMPv2-MIB.sysObjectID", "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + cls.backend_prof_4 = { + "_id": ObjectId(common_id), + "profile_4": { + "frequency": 30, + "conditions": [ + {"field": "field: IF-MIB.ifAdminStatus", "operation": "in", "value": [0, "down"]}, + {"field": "field: IF-MIB.ifOperStatus", "operation": "equals", "value": "up", 'negate_operation': True}, + {"field": "field: IF-MIB.ifIndex", "operation": "lt", "value": 3}, + {"field": "field: IF-MIB.ifIndex", "operation": "gt", "value": 5, 'negate_operation': True} + ], + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] } } @@ -97,7 +143,8 @@ def setUpClass(cls): cls.ui_group = { "_id": common_id, - "groupName": "group_1" + "groupName": "group_1", + "groupInInventory": False } cls.ui_group_device_1 = { @@ -160,6 +207,7 @@ def setUpClass(cls): cls.ui_inventory_1 = { "_id": common_id, + "inventoryType": "Host", "address": "11.0.78.114", "port": "161", "version": "3", @@ -187,6 +235,7 @@ def setUpClass(cls): cls.ui_inventory_2 = { "_id": common_id, + "inventoryType": "Group", "address": "group_1", "port": "1161", "version": "2c", @@ -199,9 +248,10 @@ def setUpClass(cls): } def test_profile_backend_to_ui(self): - self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_1), self.ui_prof_1) - self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_2), self.ui_prof_2) - self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_3), self.ui_prof_3) + self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_1, profile_in_inventory=True), self.ui_prof_1) + self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_2, profile_in_inventory=False), self.ui_prof_2) + self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_3, profile_in_inventory=True), self.ui_prof_3) + self.assertDictEqual(profile_conversion.backend2ui(self.backend_prof_4, profile_in_inventory=False), self.ui_prof_4) def test_profile_ui_to_backend(self): back_pr1 = self.backend_prof_1 @@ -212,12 +262,17 @@ def test_profile_ui_to_backend(self): back_pr3 = self.backend_prof_3 del back_pr3["_id"] + + back_pr4 = self.backend_prof_4 + del back_pr4["_id"] + self.assertDictEqual(profile_conversion.ui2backend(self.ui_prof_1), back_pr1) self.assertDictEqual(profile_conversion.ui2backend(self.ui_prof_2), back_pr2) self.assertDictEqual(profile_conversion.ui2backend(self.ui_prof_3), back_pr3) + self.assertDictEqual(profile_conversion.ui2backend(self.ui_prof_4), back_pr4) def test_group_backend_to_ui(self): - self.assertDictEqual(group_conversion.backend2ui(self.backend_group), self.ui_group) + self.assertDictEqual(group_conversion.backend2ui(self.backend_group, group_in_inventory=False), self.ui_group) def test_group_ui_to_backend(self): new_group_from_ui = { @@ -267,8 +322,8 @@ def test_group_device_ui_to_backend(self): self.assertDictEqual(group_device_conversion.ui2backend(self.ui_group_device_4), device) def test_inventory_backend_to_ui(self): - self.assertDictEqual(inventory_conversion.backend2ui(self.backend_inventory_1), self.ui_inventory_1) - self.assertDictEqual(inventory_conversion.backend2ui(self.backend_inventory_2), self.ui_inventory_2) + self.assertDictEqual(inventory_conversion.backend2ui(self.backend_inventory_1, inventory_type="Host"), self.ui_inventory_1) + self.assertDictEqual(inventory_conversion.backend2ui(self.backend_inventory_2, inventory_type="Group"), self.ui_inventory_2) def test_inventory_ui_to_backend(self): back_inv = self.backend_inventory_1 diff --git a/backend/tests/ui_handling/__init__.py b/backend/tests/ui_handling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/ui_handling/create_job_object.py b/backend/tests/ui_handling/create_job_object.py new file mode 100644 index 0000000..fa5d0b4 --- /dev/null +++ b/backend/tests/ui_handling/create_job_object.py @@ -0,0 +1,173 @@ +import yaml +from kubernetes import client +from SC4SNMP_UI_backend.apply_changes.kubernetes_job import create_job_object + +JOB_CONFIGURATION_YAML = yaml.safe_load(""" +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-splunk-connect-for-snmp-inventory + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-inventory + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-1.9.0 + app.kubernetes.io/version: "1.9.0" + app.kubernetes.io/managed-by: Helm +spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + annotations: + imageregistry: https://hub.docker.com/ + + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-inventory + app.kubernetes.io/instance: release-name + spec: + imagePullSecrets: + - name: myregistrykey + containers: + - name: splunk-connect-for-snmp-inventory + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:1.9.0" + imagePullPolicy: Always + args: + ["inventory"] + env: + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: REDIS_URL + value: redis://release-name-redis-headless:6379/1 + - name: INVENTORY_PATH + value: /app/inventory/inventory.csv + - name: CELERY_BROKER_URL + value: redis://release-name-redis-headless:6379/0 + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: LOG_LEVEL + value: INFO + - name: CONFIG_FROM_MONGO + value: "true" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: inventory + mountPath: "/app/inventory" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: inventory + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-inventory + # An array of keys from the ConfigMap to create as files + items: + - key: "inventory.csv" + path: "inventory.csv" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + restartPolicy: OnFailure +""") + +def test_create_job_object(): + expected_job = client.V1Job( + api_version="batch/v1", + kind="Job", + metadata=client.V1ObjectMeta( + name="release-name-splunk-connect-for-snmp-inventory", + labels={ + "app.kubernetes.io/name": "splunk-connect-for-snmp-inventory", + "app.kubernetes.io/instance": "release-name", + "helm.sh/chart": "splunk-connect-for-snmp-1.9.0", + "app.kubernetes.io/version": "1.9.0", + "app.kubernetes.io/managed-by": "Helm" + } + ), + spec=client.V1JobSpec( + ttl_seconds_after_finished=300, + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + annotations={ + "imageregistry": "https://hub.docker.com/" + }, + labels={ + "app.kubernetes.io/name": "splunk-connect-for-snmp-inventory", + "app.kubernetes.io/instance": "release-name" + } + ), + spec=client.V1PodSpec( + image_pull_secrets=[client.V1LocalObjectReference(name="myregistrykey")], + containers=[ + client.V1Container( + name="splunk-connect-for-snmp-inventory", + image="ghcr.io/splunk/splunk-connect-for-snmp/container:1.9.0", + image_pull_policy="Always", + args=["inventory"], + env=[ + client.V1EnvVar(name="CONFIG_PATH",value="/app/config/config.yaml"), + client.V1EnvVar(name="REDIS_URL", value="redis://release-name-redis-headless:6379/1"), + client.V1EnvVar(name="INVENTORY_PATH", value="/app/inventory/inventory.csv"), + client.V1EnvVar(name="CELERY_BROKER_URL", value="redis://release-name-redis-headless:6379/0"), + client.V1EnvVar(name="MONGO_URI", value="mongodb://release-name-mongodb:27017"), + client.V1EnvVar(name="MIB_SOURCES", value="http://release-name-mibserver/asn1/@mib@"), + client.V1EnvVar(name="MIB_INDEX", value="http://release-name-mibserver/index.csv"), + client.V1EnvVar(name="MIB_STANDARD", value="http://release-name-mibserver/standard.txt"), + client.V1EnvVar(name="LOG_LEVEL", value="INFO"), + client.V1EnvVar(name="CONFIG_FROM_MONGO", value="true") + ], + volume_mounts=[ + client.V1VolumeMount(name="config", mount_path="/app/config", read_only=True), + client.V1VolumeMount(name="inventory", mount_path="/app/inventory", read_only=True), + client.V1VolumeMount(name="pysnmp-cache-volume", mount_path="/.pysnmp/", read_only=False), + client.V1VolumeMount(name="tmp", mount_path="/tmp/", read_only=False), + ] + ) + ], + volumes=[ + client.V1Volume(name="config", + config_map=client.V1ConfigMapVolumeSource( + name="splunk-connect-for-snmp-config", + items=[ + client.V1KeyToPath(key="config.yaml",path="config.yaml") + ] + )), + client.V1Volume(name="inventory", + config_map=client.V1ConfigMapVolumeSource( + name="splunk-connect-for-snmp-inventory", + items=[ + client.V1KeyToPath(key="inventory.csv", path="inventory.csv") + ] + )), + client.V1Volume(name="pysnmp-cache-volume", empty_dir=client.V1EmptyDirVolumeSource()), + client.V1Volume(name="tmp", empty_dir=client.V1EmptyDirVolumeSource()) + ], + restart_policy="OnFailure" + ) + ) + ) + ) + + assert create_job_object(JOB_CONFIGURATION_YAML) == expected_job \ No newline at end of file diff --git a/backend/tests/ui_handling/get_endpoints/__init__.py b/backend/tests/ui_handling/get_endpoints/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/ui_handling/test_get_endpoints.py b/backend/tests/ui_handling/get_endpoints/test_get_endpoints.py similarity index 88% rename from backend/tests/ui_handling/test_get_endpoints.py rename to backend/tests/ui_handling/get_endpoints/test_get_endpoints.py index e293a6b..9f9848e 100644 --- a/backend/tests/ui_handling/test_get_endpoints.py +++ b/backend/tests/ui_handling/get_endpoints/test_get_endpoints.py @@ -1,7 +1,6 @@ from unittest import mock from bson import ObjectId - @mock.patch("pymongo.collection.Collection.find") def test_get_profile_names(m_client, client): m_client.return_value = [{ @@ -53,13 +52,15 @@ def test_get_all_profiles_list(m_client, client): "profileName": "profile_1", "frequency": 10, "conditions": { - "condition": "None", + "condition": "standard", + "conditions": [], "field": "", - "patterns": None + "patterns": [] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + 'profileInInventory': True, } ui_prof_2 = { @@ -67,13 +68,15 @@ def test_get_all_profiles_list(m_client, client): "profileName": "profile_2", "frequency": 30, "conditions": { - "condition": "field", + "condition": "smart", + "conditions": [], "field": "SNMPv2-MIB.sysObjectID", "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}], + 'profileInInventory': True, } response = client.get('/profiles') @@ -84,8 +87,9 @@ def test_get_all_profiles_list(m_client, client): @mock.patch("pymongo.collection.Collection.find") def test_get_groups_list(m_client, client): common_id = "635916b2c8cb7a15f28af40a" - m_client.return_value = [ - { + + m_client.side_effect = [ + [{ "_id": common_id, "group_1": [ {"address": "1.2.3.4"} @@ -96,17 +100,21 @@ def test_get_groups_list(m_client, client): "group_2": [ {"address": "1.2.3.4"} ] - } + }], + [], + [{"address": "group_2"}] ] expected_groups = [ { "_id": common_id, - "groupName": "group_1" + "groupName": "group_1", + "groupInInventory": False }, { "_id": common_id, - "groupName": "group_2" + "groupName": "group_2", + "groupInInventory": True } ] @@ -255,8 +263,9 @@ def test_get_devices_of_group(m_client, client): assert response.json == third_result +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") @mock.patch("pymongo.cursor.Cursor.limit") -def test_get_inventory_list(m_cursor, client): +def test_get_inventory_list(m_cursor, m_get_inventory_type, client): common_id = "635916b2c8cb7a15f28af40a" m_cursor.side_effect = [ @@ -305,9 +314,12 @@ def test_get_inventory_list(m_cursor, client): ] ] + m_get_inventory_type.side_effect = ["Host", "Group", "Group"] + first_result = [ { "_id": common_id, + "inventoryType": "Host", "address": "11.0.78.114", "port": "161", "version": "3", @@ -320,6 +332,7 @@ def test_get_inventory_list(m_cursor, client): }, { "_id": common_id, + "inventoryType": "Group", "address": "group_1", "port": "1161", "version": "2c", @@ -335,6 +348,7 @@ def test_get_inventory_list(m_cursor, client): second_result = [ { "_id": common_id, + "inventoryType": "Group", "address": "group_2", "port": "161", "version": "3", diff --git a/backend/tests/ui_handling/post_endpoints/__init__.py b/backend/tests/ui_handling/post_endpoints/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py b/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py new file mode 100644 index 0000000..58cebec --- /dev/null +++ b/backend/tests/ui_handling/post_endpoints/test_post_apply_changes.py @@ -0,0 +1,299 @@ +from unittest import mock +from unittest.mock import call +from bson import ObjectId +from copy import copy +import ruamel +import datetime +import os +from SC4SNMP_UI_backend.apply_changes.handling_chain import TMP_FILE_PREFIX + +VALUES_TEST_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../yamls_for_tests/values_test") +REFERENCE_FILES_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../../yamls_for_tests/reference_files") + +def return_generated_and_reference_files(): + reference_files_names = ["poller_inventory.yaml", "scheduler_profiles.yaml", "scheduler_groups.yaml"] + reference_files = [] + generated_files = [] + yaml = ruamel.yaml.YAML() + + for file_name in reference_files_names: + # add temporary files + reference_file_path = os.path.join(REFERENCE_FILES_DIRECTORY, file_name) + with open(reference_file_path, "r") as file: + data = yaml.load(file) + reference_files.append(copy(data)) + + generated_file_path = os.path.join(VALUES_TEST_DIRECTORY, f"{TMP_FILE_PREFIX}{file_name}") + with open(generated_file_path, "r") as file: + data = yaml.load(file) + generated_files.append(copy(data)) + + # add values files + edited_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values.yaml") + original_values_path = os.path.join(REFERENCE_FILES_DIRECTORY, "values.yaml") + with open(original_values_path, "r") as file: + data = yaml.load(file) + reference_files.append(copy(data)) + with open(edited_values_path, "r") as file: + data = yaml.load(file) + generated_files.append(copy(data)) + return reference_files, generated_files + +def delete_generated_files(): + reference_files_names = ["poller_inventory.yaml", "scheduler_profiles.yaml", "scheduler_groups.yaml"] + for file_name in reference_files_names: + generated_file_path = os.path.join(VALUES_TEST_DIRECTORY, f"{TMP_FILE_PREFIX}{file_name}") + if os.path.exists(generated_file_path): + os.remove(generated_file_path) + +def reset_generated_values(): + edited_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values.yaml") + original_values_path = os.path.join(VALUES_TEST_DIRECTORY, "values-before-edit.yaml") + yaml = ruamel.yaml.YAML() + with open(original_values_path, "r") as file: + original_data = yaml.load(file) + with open(edited_values_path, "w") as file: + yaml.dump(original_data, file) + + +common_id = "635916b2c8cb7a15f28af40a" + +groups_collection = [ + { + "_id": ObjectId(common_id), + "group1": [ + {"address": "52.14.243.157", "port": 1163}, + {"address": "20.14.10.0", "port": 161}, + ], + }, + { + "_id": ObjectId(common_id), + "group2": [ + {"address": "0.10.20.30"}, + {"address": "52.14.243.157", "port": 1165, "version": "3", "secret": "mysecret", "security_engine": "aabbccdd1234"}, + ] + } +] + +profiles_collection = [ + { + "_id": ObjectId(common_id), + "single_metric":{ + "frequency": 60, + "varBinds":[['IF-MIB', 'ifMtu', '1']] + } + }, + { + "_id": ObjectId(common_id), + "small_walk":{ + "condition":{ + "type": "walk" + }, + "varBinds":[['IP-MIB'],['IF-MIB']] + } + }, + { + "_id": ObjectId(common_id), + "gt_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "gt", "value": 1} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "lt_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "lt", "value": 2} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "in_profile":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifDescr", "operation": "in", "value": ["eth0", "test value"]} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards']] + } + }, + { + "_id": ObjectId(common_id), + "multiple_conditions":{ + "frequency": 10, + "conditions":[ + {"field": "IF-MIB.ifIndex", "operation": "gt", "value": 1}, + {"field": "IF-MIB.ifDescr", "operation": "in", "value": ["eth0", "test value"]} + ], + "varBinds":[['IF-MIB', 'ifOutDiscards'],['IF-MIB', 'ifOutErrors'],['IF-MIB', 'ifOutOctets']] + } + } +] + +inventory_collection = [ + { + "_id": ObjectId(common_id), + "address": "1.1.1.1", + "port": 161, + "version": "2c", + "community": "public", + "secret": "", + "security_engine": "", + "walk_interval": 1800, + "profiles": "small_walk;in_profile", + "smart_profiles": True, + "delete": False + }, + { + "_id": ObjectId(common_id), + "address": "group1", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "security_engine": "", + "walk_interval": 1800, + "profiles": "single_metric;multiple_conditions", + "smart_profiles": False, + "delete": False + } +] + +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_FILE", "values.yaml") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.KEEP_TEMP_FILES", "true") +@mock.patch("datetime.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_apply_changes_first_call(m_find, m_update, m_run_job, m_datetime, client): + datetime_object = datetime.datetime(2020, 7, 10, 10, 30, 0, 0) + m_datetime.utcnow = mock.Mock(return_value=datetime_object) + collection = { + "_id": ObjectId(common_id), + "previous_job_start_time": None, + "currently_scheduled": False + } + m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler + [collection], + [collection], + [collection] + ] + calls_find = [ + call(), + call(), + call() + ] + calls_update = [ + call({"_id": ObjectId(common_id)},{"$set": {"previous_job_start_time": datetime_object}}), + call({"_id": ObjectId(common_id)},{"$set": {"currently_scheduled": True}}) + ] + apply_async_calls = [ + call(countdown=300, queue='apply_changes') + ] + + m_run_job.apply_async.return_value = None + m_update.return_value = None + + response = client.post("/apply-changes") + m_find.assert_has_calls(calls_find) + m_update.assert_has_calls(calls_update) + m_run_job.apply_async.assert_has_calls(apply_async_calls) + assert response.json == {"message": "Configuration will be updated in approximately 300 seconds."} + reference_files, generated_files = return_generated_and_reference_files() + for ref_f, gen_f in zip(reference_files, generated_files): + assert ref_f == gen_f + delete_generated_files() + reset_generated_values() + +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_apply_changes_job_currently_scheduled(m_find, m_update, m_run_job, m_datetime, client): + datetime_object_old = datetime.datetime(2020, 7, 10, 10, 27, 10, 0) + datetime_object_new = datetime.datetime(2020, 7, 10, 10, 30, 0, 0) + m_datetime.datetime.utcnow = mock.Mock(return_value=datetime_object_new) + collection = { + "_id": ObjectId(common_id), + "previous_job_start_time": datetime_object_old, + "currently_scheduled": True + } + m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler + [collection], + [collection], + [collection] + ] + calls_find = [ + call(), + call(), + call() + ] + m_run_job.apply_async.return_value = None + m_update.return_value = None + + response = client.post("/apply-changes") + m_find.assert_has_calls(calls_find) + assert not m_run_job.apply_async.called + assert response.json == {"message": "Configuration will be updated in approximately 130 seconds."} + delete_generated_files() + reset_generated_values() + + +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.VALUES_DIRECTORY", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.TMP_DIR", VALUES_TEST_DIRECTORY) +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.datetime") +@mock.patch("SC4SNMP_UI_backend.apply_changes.handling_chain.run_job") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_apply_changes_new_job_delay_1(m_find, m_update, m_run_job, m_datetime, client): + datetime_object_old = datetime.datetime(2020, 7, 10, 10, 20, 0, 0) + datetime_object_new = datetime.datetime(2020, 7, 10, 10, 30, 0, 0) + m_datetime.datetime.utcnow = mock.Mock(return_value=datetime_object_new) + collection = { + "_id": ObjectId(common_id), + "previous_job_start_time": datetime_object_old, + "currently_scheduled": False + } + m_find.side_effect = [ + groups_collection, # call from SaveConfigToFileHandler + profiles_collection, # call from SaveConfigToFileHandler + inventory_collection, # call from SaveConfigToFileHandler + [collection], + [collection], + [collection] + ] + calls_find = [ + call(), + call(), + call() + ] + apply_async_calls = [ + call(countdown=1, queue='apply_changes') + ] + + m_run_job.apply_async.return_value = None + m_update.return_value = None + + response = client.post("/apply-changes") + m_find.assert_has_calls(calls_find) + m_run_job.apply_async.assert_has_calls(apply_async_calls) + assert response.json == {"message": "Configuration will be updated in approximately 1 seconds."} + delete_generated_files() + reset_generated_values() diff --git a/backend/tests/ui_handling/post_endpoints/test_post_groups.py b/backend/tests/ui_handling/post_endpoints/test_post_groups.py new file mode 100644 index 0000000..be59eda --- /dev/null +++ b/backend/tests/ui_handling/post_endpoints/test_post_groups.py @@ -0,0 +1,579 @@ +from unittest import mock +from unittest.mock import call, Mock +from bson import ObjectId + +common_id = "635916b2c8cb7a15f28af40a" + +# TEST ADDING GROUP +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_record_success(m_find, m_insert, client): + ui_group = { + "groupName": "group_1" + } + + backend_group = { + "group_1": [] + } + + find_calls = [ + call({'address': 'group_1', 'delete': False}), + call({f"group_1": {"$exists": True}}), + ] + m_find.side_effect = [[],[]] + + response = client.post(f"/groups/add", json=ui_group) + m_insert.return_value = None + m_find.has_calls(find_calls) + assert m_insert.call_args == call(backend_group) + assert response.json == "success" + +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_record_with_already_existing_name_failure(m_find, m_insert, client): + ui_group = { + "groupName": "group_1" + } + + backend_group = { + "group_1": [] + } + + m_find.side_effect = [ + [backend_group] + ] + + response = client.post(f"/groups/add", json=ui_group) + assert not m_insert.called + assert response.json == {"message": "Group with name group_1 already exists. Group was not added."} + +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_record_with_name_existing_in_inventory_as_hostname_failure(m_find, m_insert, client): + ui_group = { + "groupName": "test" + } + + m_find.side_effect = [ + [], + [{"address": "test"}] + ] + + response = client.post(f"/groups/add", json=ui_group) + assert not m_insert.called + assert response.json == {"message": "In the inventory there is a record with name test. Group was not added."} + +# TEST UPDATING GROUP +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_success(m_find, m_update, client): + + ui_group_new = { + "_id": common_id, + "groupName": "group_1_edit" + } + + backend_group_old = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4"}, + ] + } + + calls_update = [ + call({'_id': ObjectId(common_id)}, {"$rename": {"group_1": "group_1_edit"}}), + call({"address": "group_1"}, {"$set": {"address": 'group_1_edit'}}) + ] + + m_find.side_effect = [ + [], + [], + [backend_group_old] + ] + + response = client.post(f"/groups/update/{common_id}", json=ui_group_new) + m_update.assert_has_calls(calls_update) + assert response.json == {"message": "group_1 was also renamed to group_1_edit in the inventory"} + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_failure(m_find, m_update, client): + ui_group_new = { + "_id": common_id, + "groupName": "group_1_edit" + } + + backend_group_old = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4"}, + ] + } + + backend_group_existing = { + "_id": ObjectId(common_id), + "group_1_edit": [ + {"address": "1.2.3.4"}, + ] + } + + m_find.side_effect = [ + [backend_group_old], + [backend_group_existing] + ] + + response = client.post(f"/groups/update/{common_id}", json=ui_group_new) + #m_update.assert_has_calls(calls_update) + assert not m_update.called + assert response.json == {"message": "Group with name group_1_edit already exists. Group was not edited."} + +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_record_with_name_existing_in_inventory_as_hostname_failure(m_find, m_insert, client): + ui_group = { + "_id": common_id, + "groupName": "test" + } + + m_find.side_effect = [ + [], + [{"address": "test"}] + ] + + response = client.post(f"/groups/update/{common_id}", json=ui_group) + assert not m_insert.called + assert response.json == {"message": "In the inventory there is a record with name test. Group was not edited."} + +# TEST DELETING GROUP +@mock.patch("pymongo.collection.Collection.find") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.MongoClient.start_session") +def test_delete_group_and_devices(m_session, m_update, m_delete, m_find, client): + backend_group = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4"}, + ] + } + m_session.return_value.__enter__.return_value.start_transaction.__enter__ = Mock() + + m_find.side_effect = [ + [backend_group], + [] + ] + + calls_find = [ + call({'_id': ObjectId(common_id)}), + call({"address": "group_1"}) + ] + + m_delete.return_value = None + m_update.return_value = None + + response = client.post(f"/groups/delete/{common_id}") + m_find.assert_has_calls(calls_find) + assert m_delete.call_args == call({'_id': ObjectId(common_id)}) + assert m_update.call_args == call({"address": "group_1"}, {"$set": {"delete": True}}) + assert response.json == { + "message": "Group group_1 was deleted."} + + m_find.side_effect = [ + [backend_group], + [{}] + ] + + response = client.post(f"/groups/delete/{common_id}") + m_find.assert_has_calls(calls_find) + assert m_delete.call_args == call({'_id': ObjectId(common_id)}) + assert m_update.call_args == call({"address": "group_1"}, {"$set": {"delete": True}}) + assert response.json == { + "message": "Group group_1 was deleted. It was also deleted from the inventory."} + + +# TEST ADDING DEVICE +ui_group_device_add_new_success = lambda : { + "address": "2.2.2.2", + "port": "", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + +backend_group_add_device_old = lambda : { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4", "port": 161}, + ] + } + +backend_group_add_device_success_new = lambda : { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.2.3.4", 'port': 161}, + {"address": "2.2.2.2", "version": "3", "secret": "snmpv3"} + ] + } + +group_inventory = lambda : { + "_id": ObjectId(common_id), + "address": "group_1", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_device_to_group_not_configured_in_inventory_success(m_find, m_update, client): + m_find.side_effect = [ + [backend_group_add_device_old()], + [], + [backend_group_add_device_old()] + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), + call({"address": "group_1", "delete": False}), + call({'_id': ObjectId(common_id)}, {"_id": 0}) + ] + m_update.return_value = None + + response = client.post(f"/devices/add", json=ui_group_device_add_new_success()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_add_device_success_new()}) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_device_to_group_configured_in_inventory_success(m_find, m_update, client): + + m_find.side_effect = [ + [backend_group_add_device_old()], # call from group/routes.add_device_to_group + [group_inventory()], # call from HandleNewDevice.add_group_host + [backend_group_add_device_old()], # call from HandleNewDevice.add_group_host + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [group_inventory()], # call from HandleNewDevice._is_host_in_group + [backend_group_add_device_old()], # call from HandleNewDevice._is_host_in_group + [] # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.add_device_to_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.add_group_host + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from HandleNewDevice.add_group_host + call({'address': "2.2.2.2", 'port': 1161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "2.2.2.2", 'port': 1161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_1": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({"2.2.2.2": {"$exists": True}}) # call from HandleNewDevice.add_single_host + ] + m_update.return_value = None + + response = client.post(f"/devices/add", json=ui_group_device_add_new_success()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_add_device_success_new()}) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_device_to_group_not_configured_in_inventory_failed(m_find, m_update, client): + ui_group_device_new = { + "address": "1.2.3.4", + "port": "161", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + + m_find.side_effect = [ + [backend_group_add_device_old()], # call from group/routes.add_device_to_group + [], # call from HandleNewDevice.add_group_host + [backend_group_add_device_old()] # call from HandleNewDevice.add_group_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.add_device_to_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.add_group_host + call({'_id': ObjectId(common_id)}, {"_id": 0}) # call from HandleNewDevice.add_group_host + ] + m_update.return_value = None + + response = client.post(f"/devices/add", json=ui_group_device_new) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert response.json == {'message': 'Host 1.2.3.4:161 already exists in group group_1. Record was not added.'} + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_device_to_group_configured_in_inventory_failed(m_find, m_update, client): + + ui_group_device_new = { + "address": "5.5.5.5", + "port": "161", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + + existing_device_inventory = { + "_id": ObjectId(common_id), + "address": "5.5.5.5", + "port": 161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + + m_find.side_effect = [ + [backend_group_add_device_old()], # call from group/routes.add_device_to_group + [group_inventory()], # call from HandleNewDevice.add_group_host + [backend_group_add_device_old()], # call from HandleNewDevice.add_group_host + [existing_device_inventory], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.add_device_to_groupp + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.add_group_host + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from HandleNewDevice.add_group_host + call({'address': "5.5.5.5", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "5.5.5.5", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"5.5.5.5": {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/devices/add", json=ui_group_device_new) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert response.json == {'message': 'Host 5.5.5.5:161 already exists in the inventory. Record was not added.'} + + +# TEST UPDATING DEVICES +backend_group_update_device_old = lambda : { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "2.2.2.2"}, + {"address": "3.3.3.3"} + ] + } + +backend_group_update_device_success_new = lambda : { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "2.2.2.3", "port": 1161, "version": "2c", "community": "public", + "security_engine": "1112233aabbccdee"}, + {"address": "3.3.3.3"} + ] + } + +ui_group_device_update_new_success = lambda : { + "address": "2.2.2.3", + "port": "1161", + "version": "2c", + "community": "public", + "secret": "", + "securityEngine": "1112233aabbccdee", + "groupId": str(common_id) + } + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_device_from_group_not_configured_in_inventory_success(m_find, m_update, client): + + m_find.side_effect = [ + [backend_group_update_device_old()], # call from group/routes.update_device_from_group + [], # call from HandleNewDevice.edit_group_host + [backend_group_update_device_old()] # call from HandleNewDevice.edit_group_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.update_device_from_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.edit_group_host + call({'_id': ObjectId(common_id)}) # call from HandleNewDevice.edit_group_host + ] + m_update.return_value = None + + response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_update_new_success()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_update_device_success_new()}) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_device_from_group_configured_in_inventory_success(m_find, m_update, client): + m_find.side_effect = [ + [backend_group_update_device_old()], # call from group/routes.update_device_from_group + [group_inventory()], # call from HandleNewDevice.edit_group_host + [backend_group_update_device_old()], # call from HandleNewDevice.edit_group_host + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [group_inventory()], # call from HandleNewDevice._is_host_in_group + [backend_group_update_device_old()] # call from HandleNewDevice._is_host_in_group + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.update_device_from_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.edit_group_host + call({'_id': ObjectId(common_id)}), # call from HandleNewDevice.edit_group_host + call({'address': "2.2.2.3", 'port': 1161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "2.2.2.3", 'port': 1161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_1": {"$exists": 1}}) # call from HandleNewDevice._is_host_in_group + ] + m_update.return_value = None + + response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_update_new_success()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_update_device_success_new()}) + assert response.json == "success" + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_device_from_group_not_configured_in_inventory_failed(m_find, m_update, client): + ui_group_device_new = { + "address": "3.3.3.3", + "port": "", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + m_find.side_effect = [ + [backend_group_update_device_old()], # call from group/routes.update_device_from_group + [], # call from HandleNewDevice.edit_group_host + [backend_group_update_device_old()] # call from HandleNewDevice.edit_group_host + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.add_device_to_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.edit_group_host + call({'_id': ObjectId(common_id)}) # call from HandleNewDevice.edit_group_host + ] + m_update.return_value = None + + response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_new) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert response.json == {'message': 'Host 3.3.3.3: already exists in group group_1. Record was not edited.'} + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_device_from_group_configured_in_inventory_failed(m_find, m_update, client): + ui_group_device_new = { + "address": "5.5.5.5", + "port": "161", + "version": "3", + "community": "", + "secret": "snmpv3", + "securityEngine": "", + "groupId": str(common_id) + } + + second_group_inventory = { + "_id": ObjectId(common_id), + "address": "group_2", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + + second_group_inventory_backend= { + "_id": ObjectId(common_id), + "group_2": [ + {"address": "5.5.5.5", "port": 161}, + ] + } + + m_find.side_effect = [ + [backend_group_update_device_old()], # call from group/routes.update_device_from_group + [group_inventory()], # call from HandleNewDevice.edit_group_host + [backend_group_update_device_old()], # call from HandleNewDevice.edit_group_host + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [second_group_inventory], # call from HandleNewDevice._is_host_in_group + [second_group_inventory_backend] # call from HandleNewDevice._is_host_in_group + ] + calls_find = [ + call({'_id': ObjectId(common_id)}, {"_id": 0}), # call from group/routes.update_device_from_group + call({"address": "group_1", "delete": False}), # call from HandleNewDevice.edit_group_host + call({'_id': ObjectId(common_id)}), # call from HandleNewDevice.edit_group_host + call({'address': "5.5.5.5", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "5.5.5.5", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}) # call from HandleNewDevice._is_host_in_group + ] + + response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_new) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert response.json == {'message': 'Host 5.5.5.5:161 already exists in group group_2. Record was not edited.'} + + +# TEST DELETING DEVICE +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_delete_device_from_group_record(m_find, m_update, client): + + backend_group_old = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "2.2.2.3", "port": 1161, "version": "2c", "community": "public", + "security_engine": "1112233aabbccdee"}, + {"address": "3.3.3.3"} + ] + } + + backend_group_new1 = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "3.3.3.3"} + ] + } + + backend_group_new2 = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "3.3.3.3"} + ] + } + + m_find.return_value = [backend_group_old] + m_update.return_value = None + response = client.post(f"/devices/delete/{common_id}-1") + + assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new1}) + assert response.json == {'message': 'Device 2.2.2.3:1161 from group group_1 was deleted.'} + + m_find.return_value = [backend_group_new1] + response = client.post(f"/devices/delete/{common_id}-0") + assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new2}) + assert response.json == {'message': 'Device 1.1.1.1: from group group_1 was deleted.'} \ No newline at end of file diff --git a/backend/tests/ui_handling/post_endpoints/test_post_inventory.py b/backend/tests/ui_handling/post_endpoints/test_post_inventory.py new file mode 100644 index 0000000..ea28e2b --- /dev/null +++ b/backend/tests/ui_handling/post_endpoints/test_post_inventory.py @@ -0,0 +1,1263 @@ +from unittest import mock +from unittest.mock import call, Mock +from bson import ObjectId + + +common_id = "635916b2c8cb7a15f28af40a" + +# TEST ADDING A SINGLE HOST +ui_inventory_new = lambda : { + "inventoryType": "Host", + "address": "11.0.78.114", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False +} + +backend_inventory_new = lambda : { + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +ui_inventory_new_host_name = lambda : { + "inventoryType": "Host", + "address": "test", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False +} + +backend_inventory_new_host_name = lambda : { + "address": "test", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_single_host_success(m_find, m_insert, m_delete, client): + + # Test adding a new device, when there was no device with the same + # address and port with deleted flag set to True. + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'address': "11.0.78.114", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "11.0.78.114", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"11.0.78.114": {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + + response = client.post(f"/inventory/add", json=ui_inventory_new()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new()) + assert not m_delete.called + assert response.json == "success" + + # Test adding a new device when there was a device with the same + # address and port with deleted flag set to True. + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [{ + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + }], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + + response = client.post(f"/inventory/add", json=ui_inventory_new()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new()) + assert m_delete.call_args == call({"_id": ObjectId(common_id)}) + assert response.json == "success" + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_single_host_name_success(m_find, m_insert, m_delete, client): + # Test adding a new device, when there was no device with the same + # address and port with deleted flag set to True. + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'address': "test", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "test", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"test": {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=ui_inventory_new_host_name()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new_host_name()) + assert not m_delete.called + assert response.json == "success" + + # Test adding a new device when there was a device with the same + # address and port with deleted flag set to True. + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [{ + "_id": ObjectId(common_id), + "address": "test", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + }], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=ui_inventory_new_host_name()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new_host_name()) + assert m_delete.call_args == call({"_id": ObjectId(common_id)}) + assert response.json == "success" + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_single_host_failure(m_find, m_insert, m_delete, client): + + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [{ + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + }], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice.add_single_host + ] + calls_find = [ + call({'address': "11.0.78.114", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "11.0.78.114", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({'11.0.78.114': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=ui_inventory_new()) + m_find.assert_has_calls(calls_find) + assert not m_delete.called + assert not m_insert.called + assert response.json == {"message": "Host 11.0.78.114:161 already exists in the inventory. " + "Record was not added."} + + m_find.side_effect = [ + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [{"test":[]}], # call from HandleNewDevice.add_single_host + ] + response = client.post(f"/inventory/add", json=ui_inventory_new_host_name()) + assert not m_delete.called + assert not m_insert.called + assert response.json == {"message": "There is a group with the same name configured. Record test can't be added as a single host."} + + +# TEST UPDATING A SINGLE HOST +backend_inventory_old = lambda : { + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 2000, + "security_engine": "1234aabbccd", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_edit_single_host_success(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + # Test editing a device without changing its address and port + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Host" + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [backend_inventory_old()], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [backend_inventory_old()], # call from HandleNewDevice.edit_single_host + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes.update_inventory_record + call({'address': "11.0.78.114", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "11.0.78.114", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"_id": ObjectId(common_id)}), # call from HandleNewDevice.edit_single_host + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new()}) + assert not m_insert.called + assert not m_delete.called + assert response.json == "success" + + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_edit_single_host_address_and_port_success(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + # Test editing a device with changing its address and port + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Host" + ui_inventory_new_address_port = { + "inventoryType": "Host", + "address": "1.0.0.0", + "port": "1111", + "version": "3", + "community": "", + "secret": "my_secret_new", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + backend_inventory_new_address_port = { + "address": "1.0.0.0", + "port": 1111, + "version": "3", + "community": "", + "secret": "my_secret_new", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + } + deleted_host_backend = { + "_id": ObjectId("43EE0BCBA668527E7106E4F5"), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + } + + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [], # call from HandleNewDevice._is_host_configured + [deleted_host_backend], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [backend_inventory_old()], # call from HandleNewDevice.edit_single_host + [], # call from HandleNewDevice._is_host_configured + [deleted_host_backend], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes.update_inventory_record + call({'address': "1.0.0.0", 'port': 1111, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.0.0.0", 'port': 1111, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"_id": ObjectId(common_id)}), + call({'address': "1.0.0.0", 'port': 1111, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.0.0.0", 'port': 1111, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({'1.0.0.0': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new_address_port) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new_address_port) + assert m_delete.call_args == call({"_id": ObjectId("43EE0BCBA668527E7106E4F5")}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) + assert response.json == { + "message": "Address or port was edited which resulted in deleting the old device and creating " \ + "the new one at the end of the list."} + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_edit_ip_to_hostname_success(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Host" + ui_inventory_new_address_port = { + "inventoryType": "Host", + "address": "test", + "port": "1111", + "version": "3", + "community": "", + "secret": "my_secret_new", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + backend_inventory_new_address_port = { + "address": "test", + "port": 1111, + "version": "3", + "community": "", + "secret": "my_secret_new", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + } + deleted_host_backend = { + "_id": ObjectId("43EE0BCBA668527E7106E4F5"), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + } + + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [], # call from HandleNewDevice._is_host_configured + [deleted_host_backend], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [backend_inventory_old()], # call from HandleNewDevice.edit_single_host + [], # call from HandleNewDevice._is_host_configured + [deleted_host_backend], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes.update_inventory_record + call({'address': "test", 'port': 1111, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "test", 'port': 1111, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"_id": ObjectId(common_id)}), + call({'address': "test", 'port': 1111, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "test", 'port': 1111, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({'test': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new_address_port) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(backend_inventory_new_address_port) + assert m_delete.call_args == call({"_id": ObjectId("43EE0BCBA668527E7106E4F5")}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) + assert response.json == { + "message": "Address or port was edited which resulted in deleting the old device and creating " \ + "the new one at the end of the list."} + + +backend_inventory_old = lambda : { + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 2000, + "security_engine": "1234aabbccd", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + } + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_edit_single_host_failed(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + existing_id = "035916b2c8cb7a15f28af40b" + + ui_inventory_new = { + "inventoryType": "Host", + "address": "0.0.0.0", + "port": "1161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Host" + + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [{ + "_id": ObjectId(existing_id), + "address": "0.0.0.0", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + }], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes.update_inventory_record + call({'address': "0.0.0.0", 'port': 1161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "0.0.0.0", 'port': 1161, "delete": True}), # call from HandleNewDevice._is_host_configured + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) + m_find.assert_has_calls(calls_find) + assert response.json == {"message": "Host 0.0.0.0:1161 already exists in the inventory. " + "Record was not edited."} + assert response.status_code == 400 + assert not m_insert.called + assert not m_update.called + assert not m_delete.called + + ui_inventory_new = { + "inventoryType": "Host", + "address": "test", + "port": "1161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + m_find.side_effect = [ + [backend_inventory_old()], # call from inventory/routes.update_inventory_record + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [backend_inventory_old()], # call from HandleNewDevice.edit_single_host + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_in_group + [{"test":[]}], # call from HandleNewDevice.add_single_host + ] + response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) + assert response.json == {"message": "There is a group with the same name configured. Record test can't be added as a single host."} + assert response.status_code == 400 + assert not m_insert.called + assert not m_update.called + assert not m_delete.called + + + + + +# TEST ADDING A GROUP +new_group_ui_inventory = lambda : { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False +} + +new_group_backend_inventory = lambda :{ + "address": "group_1", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +new_group_backend = lambda : { + "_id": ObjectId(common_id), + "group_1": [{"address": "1.2.3.4"}] +} + +existing_group_backend = lambda : { + "_id": ObjectId("43EE0BCBA668527E7106E4F5"), + "group_2": [{"address": "0.0.0.0"}] +} + +existing_group_inventory_backend = lambda : { + "_id": ObjectId("43EE0BCBA668527E7106E4F5"), + "address": "group_2", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_success(m_find, m_insert, m_delete, client): + + m_insert.return_value = None + m_delete.return_value = None + + # Test adding a new group, when there was no group with the same name with deleted flag set to True + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend()], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "1.2.3.4", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.2.3.4", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'1.2.3.4': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(new_group_backend_inventory()) + assert not m_delete.called + assert response.json == "success" + + # Test adding a new group, when there was a group with the same name with deleted flag set to True + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [{ + "_id": ObjectId("83EE0BCBA668527E7106E4F5"), + "address": "group_3", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + }], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend()], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert m_insert.call_args == call(new_group_backend_inventory()) + assert m_delete.call_args == call({"_id": ObjectId("83EE0BCBA668527E7106E4F5")}) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_which_exists_failure(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [new_group_backend_inventory()], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend()], # call from HandleNewDevice.add_group_to_inventory + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Group group_1 has already been added to the inventory. " + "Record was not added."} + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_with_hosts_configured_failure(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + new_group_ui_failure = { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + new_group_backend_failure = { + "_id": ObjectId(common_id), + "group_1": [{"address": "0.0.0.0"}] + } + + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend_failure], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "0.0.0.0", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "0.0.0.0", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'0.0.0.0': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=new_group_ui_failure) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't add group group_1. " + "Host 0.0.0.0:161 already exists in group group_2. Record was not added."} + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_with_host_configured_multiple_times_failure(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + new_group_ui_failure = { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + new_group_backend_failure = { + "_id": ObjectId(common_id), + "group_1": [ + {"address": "1.1.1.1"}, + {"address": "1.1.1.1"} + ] + } + + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend_failure], # call from HandleNewDevice.add_group_to_inventory + + # first iteration in HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + + # second iteration in HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + + # first iteration in HandleNewDevice.add_group_to_inventory + call({'address': "1.1.1.1", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.1.1.1", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'1.1.1.1': {"$exists": True}}), # call from HandleNewDevice.add_single_host + + # second iteration in HandleNewDevice.add_group_to_inventory + call({'address': "1.1.1.1", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "1.1.1.1", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'1.1.1.1': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/add", json=new_group_ui_failure) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't add group group_1. " + "Device 1.1.1.1:161 was configured multiple times in this group. " + "Record was not added."} + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_without_configuration(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Group group_1 doesn't exist in the configuration. " + "Record was not added."} + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_add_group_without_configuration_failure(m_find, m_insert, m_delete, client): + m_insert.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [new_group_backend_inventory()], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + ] + + calls_find = [ + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_1': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + ] + + response = client.post(f"/inventory/add", json=new_group_ui_inventory()) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Group group_1 doesn't exist in the configuration. Record was not added."} + + + +# TEST UPDATING A GROUP + +ui_edited_inventory_group = lambda : { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False +} + +edited_inventory_group = lambda : { + "address": "group_1", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +backend_inventory_existing_edit_group = lambda : { + "_id": ObjectId(common_id), + "address": "group_1", + "port": 161, + "version": "2", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False +} + +backend_existing_edit_group = lambda : { + "_id": ObjectId(common_id), + "group_1": [{"address": "1.1.1.1"}] +} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_without_changing_name_success(m_find, m_insert, m_update, m_delete, client): + + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + [backend_existing_edit_group()], # call from inventory/routes/get_inventory_type + [backend_inventory_existing_edit_group()], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.edit_group_in_inventory + [{"group_1": []}], # call from HandleNewDevice.edit_group_in_inventory + [backend_inventory_existing_edit_group()] # call from HandleNewDevice.edit_group_in_inventory + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + call({"group_1": {"$exists": 1}}), # call from inventory/routes/get_inventory_type + call({'address': "group_1", "delete": False}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_1", "delete": True}), # call from HandleNewDevice.edit_group_in_inventory + call({"group_1": {"$exists": 1}}), # call from HandleNewDevice.edit_group_in_inventory + call({"_id": ObjectId(common_id)}) # call from HandleNewDevice.edit_group_in_inventory + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_edited_inventory_group()) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": edited_inventory_group()}) + assert not m_insert.called + assert not m_delete.called + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_with_changing_name_success(m_find, m_insert, m_update, m_delete, client): + + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + + new_name_group_ui = { + "inventoryType": "Group", + "address": "group_2", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + new_name_group = { + "address": "group_2", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + } + + second_group_backend = { + "_id": ObjectId("19E121BD031284F3CE845B72"), + "group_2": [] + } + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + [backend_existing_edit_group()], # call from inventory/routes/get_inventory_type + [], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.edit_group_in_inventory + [{"group_2": []}], # call from HandleNewDevice.edit_group_in_inventory + [backend_inventory_existing_edit_group()], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [second_group_backend] # call from HandleNewDevice.add_group_to_inventory + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + call({"group_1": {"$exists": 1}}), # call from inventory/routes/get_inventory_type + call({'address': "group_2", "delete": False}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_2", "delete": True}), # call from HandleNewDevice.edit_group_in_inventory + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice.edit_group_in_inventory + call({"_id": ObjectId(common_id)}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_2", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_2", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({"group_2": {"$exists": 1}}) # call from HandleNewDevice.add_group_to_inventory + ] + + response = client.post(f"/inventory/update/{common_id}", json=new_name_group_ui) + m_find.assert_has_calls(calls_find) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) + assert m_insert.call_args == call(new_name_group) + assert not m_delete.called + assert response.json == {"message": "Group name was edited which resulted in deleting the old group and creating new " \ + "one at the end of the list."} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_to_already_configured_failure(m_find, m_insert, m_update, m_delete, client): + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + + new_name_group_ui = { + "inventoryType": "Group", + "address": "group_2", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + inventory_existing_other_group = { + "_id": ObjectId("83EE0BCBA668527E7106E4F5"), + "address": "group_2", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": True + } + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + [backend_existing_edit_group()], # call from inventory/routes/get_inventory_type + [inventory_existing_other_group], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.edit_group_in_inventory + [{"group_2": []}], # call from HandleNewDevice.edit_group_in_inventory + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + call({"group_1": {"$exists": 1}}), # call from inventory/routes/get_inventory_type + call({'address': "group_2", "delete": False}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_2", "delete": True}), # call from HandleNewDevice.edit_group_in_inventory + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice.edit_group_in_inventory + ] + + response = client.post(f"/inventory/update/{common_id}", json=new_name_group_ui) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Group with name group_2 already exists. Record was not edited."} + +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_to_other_group_with_host_already_configured_failure(m_find, m_insert, m_update, m_delete, client): + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + + new_group_ui_failure = { + "inventoryType": "Group", + "address": "group_3", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + new_group_backend_failure = { + "_id": ObjectId(common_id), + "group_3": [{"address": "0.0.0.0"}] + } + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + [backend_existing_edit_group()], # call from inventory/routes/get_inventory_type + [], # call from HandleNewDevice.edit_group_in_inventory + [], # call from HandleNewDevice.edit_group_in_inventory + [{"group_3": []}], # call from HandleNewDevice.edit_group_in_inventory + [backend_inventory_existing_edit_group()], # call from HandleNewDevice.edit_group_in_inventory + + [], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice.add_group_to_inventory + [new_group_backend_failure], # call from HandleNewDevice.add_group_to_inventory + [], # call from HandleNewDevice._is_host_configured + [], # call from HandleNewDevice._is_host_configured + [existing_group_inventory_backend()], # call from HandleNewDevice._is_host_in_group + [existing_group_backend()], # call from HandleNewDevice._is_host_in_group + [], # call from HandleNewDevice.add_single_host + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + call({"group_1": {"$exists": 1}}), # call from inventory/routes/get_inventory_type + call({'address': "group_3", "delete": False}), # call from HandleNewDevice.edit_group_in_inventory + call({'address': "group_3", "delete": True}), # call from HandleNewDevice.edit_group_in_inventory + call({"group_3": {"$exists": 1}}), # call from HandleNewDevice.edit_group_in_inventory + call({"_id": ObjectId(common_id)}), # call from HandleNewDevice.edit_group_in_inventory + + call({'address': "group_3", "delete": False}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "group_3", "delete": True}), # call from HandleNewDevice.add_group_to_inventory + call({'group_3': {"$exists": 1}}), # call from HandleNewDevice.add_group_to_inventory + call({'address': "0.0.0.0", 'port': 161, "delete": False}), # call from HandleNewDevice._is_host_configured + call({'address': "0.0.0.0", 'port': 161, "delete": True}), # call from HandleNewDevice._is_host_configured + call({"address": {"$regex": "^[a-zA-Z].*"}, "delete": False}), # call from HandleNewDevice._is_host_in_group + call({"group_2": {"$exists": 1}}), # call from HandleNewDevice._is_host_in_group + call({'0.0.0.0': {"$exists": True}}), # call from HandleNewDevice.add_single_host + ] + + response = client.post(f"/inventory/update/{common_id}", json=new_group_ui_failure) + m_find.assert_has_calls(calls_find) + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't add group group_3. " + "Host 0.0.0.0:161 already exists in group group_2. Record was not added."} + + +@mock.patch("SC4SNMP_UI_backend.inventory.routes.get_inventory_type") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.insert_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_group_host_or_host_to_group_failure(m_find, m_insert, m_update, m_delete, m_get_inventory_type, client): + m_insert.return_value = None + m_update.return_value = None + m_delete.return_value = None + m_get_inventory_type.return_value = "Group" + + ui_edit_group_to_host = { + "inventoryType": "Host", + "address": "1.1.1.1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + m_find.side_effect = [ + [backend_inventory_existing_edit_group()], # call from inventory/routes/update_inventory_record + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from inventory/routes/update_inventory_record + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_edit_group_to_host) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't edit single host to the group or group to the single host"} + + m_get_inventory_type.return_value = "Host" + backend_edit_host_to_group = { + "_id": ObjectId(common_id), + "address": "1.1.1.1", + "port": 161, + "version": "2", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1;prof2;prof3", + "smart_profiles": False, + "delete": False + } + + ui_edit_group_to_host2 = { + "inventoryType": "Group", + "address": "group_1", + "port": "161", + "version": "3", + "community": "", + "secret": "my_secret", + "walkInterval": 1800, + "securityEngine": "1234aabbccd", + "profiles": ["prof1", "prof2", "prof3"], + "smartProfiles": False + } + + m_find.side_effect = [ + [backend_edit_host_to_group], # call from HandleNewDevice.update_inventory_record + ] + + calls_find = [ + call({"_id": ObjectId(common_id)}), # call from HandleNewDevice.update_inventory_record + ] + + response = client.post(f"/inventory/update/{common_id}", json=ui_edit_group_to_host2) + m_find.assert_has_calls(calls_find) + assert not m_update.called + assert not m_insert.called + assert not m_delete.called + assert response.json == {"message": "Can't edit single host to the group or group to the single host"} + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_delete_inventory_record(m_find, m_update, client): + m_update.return_value = None + m_find.return_value = [{ + "_id": ObjectId(common_id), + "address": "group_1", + "port": 1161, + "version": "2c", + "community": "public", + "secret": "", + "walk_interval": 1800, + "security_engine": "", + "profiles": "prof1", + "smart_profiles": False, + "delete": False + }] + response = client.post(f"/inventory/delete/{common_id}") + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) + assert response.json == {"message": f"group_1 was deleted."} diff --git a/backend/tests/ui_handling/post_endpoints/test_post_profiles.py b/backend/tests/ui_handling/post_endpoints/test_post_profiles.py new file mode 100644 index 0000000..e2d1388 --- /dev/null +++ b/backend/tests/ui_handling/post_endpoints/test_post_profiles.py @@ -0,0 +1,274 @@ +from unittest import mock +from unittest.mock import call +from bson import ObjectId + + +# TEST ADDING PROFILE +@mock.patch("pymongo.collection.Collection.find") +@mock.patch("pymongo.collection.Collection.insert_one") +def test_add_profile_record_success(m_insert, m_find, client): + m_insert.return_value = None + m_find.return_value = [] + ui_prof = { + "profileName": "profile_1", + "frequency": 10, + "conditions": { + "condition": "standard", + "field": "", + "patterns": None + }, + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}, + {"component": "IF-MIB", "object": "", "index": ""}, + {"component": "IF-MIB", "object": "ifOutErrors", "index": ""}] + } + backend_prof = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + response = client.post("/profiles/add", json=ui_prof) + assert m_find.call_args == call({"profile_1": {"$exists": True}}) + assert m_insert.call_args == call(backend_prof) + assert response.json == "success" + + +@mock.patch("pymongo.collection.Collection.find") +@mock.patch("pymongo.collection.Collection.insert_one") +def test_add_profile_record_failure(m_insert, m_find, client): + ui_prof = { + "profileName": "profile_1", + "frequency": 10, + "conditions": { + "condition": "None", + "field": "", + "patterns": None + }, + "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, + {"family": "IF-MIB", "category": "", "index": ""}, + {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] + } + backend_prof = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + m_insert.return_value = None + m_find.return_value = [backend_prof] + + response = client.post("/profiles/add", json=ui_prof) + assert m_find.call_args == call({"profile_1": {"$exists": True}}) + assert not m_insert.called + assert response.json == {"message": f"Profile with name profile_1 already exists. Profile was not added."} + + +# TEST DELETING PROFILE +@mock.patch("pymongo.collection.Collection.find") +@mock.patch("pymongo.collection.Collection.delete_one") +@mock.patch("pymongo.collection.Collection.update_one") +def test_delete_profile_record(m_update, m_delete, m_find, client): + common_id = "635916b2c8cb7a15f28af40a" + profile = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + backend_inventory = { + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "profile_1;profile_2", + "smart_profiles": False, + "delete": False + } + + backend_inventory_update = { + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "profile_2", + "smart_profiles": False, + "delete": False + } + + m_find.side_effect = [ + [profile], + [backend_inventory] + ] + m_delete.return_value = None + m_update.return_value = None + + response = client.post(f'/profiles/delete/{common_id}') + + calls = [call({'_id': ObjectId(common_id)}, {"_id": 0}), call({"profiles": {"$regex": '.*profile_1.*'}, "delete": False})] + m_find.assert_has_calls(calls) + assert m_delete.call_args == call({"_id": ObjectId(common_id)}) + assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_update}) + assert response.json == {"message": f"Profile profile_1 was deleted. It was also deleted from some inventory records."} + + +# TEST UPDATING PROFILE +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_profile_record_no_name_change_success(m_find, m_update, client): + common_id = "635916b2c8cb7a15f28af40a" + ui_prof_1_new = { + "profileName": "profile_1", + "frequency": 20, + "conditions": { + "condition": "smart", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] + }, + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}] + } + + backend_prof_1_old = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", "1"], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + backend_prof_1_new = { + "profile_1": { + "frequency": 20, + "condition": {"type": "field", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, + "varBinds": [["IF-MIB", "ifInDiscards", "1"]] + } + } + + m_find.side_effect = [[], [backend_prof_1_old]] + m_update.return_value = None + + response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) + assert m_update.call_args == call({'_id': ObjectId(common_id)}, + {"$set": {"profile_1": backend_prof_1_new["profile_1"]}}) + assert response.json == "success" + assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) + + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_profile_record_with_name_change_success(m_find, m_update, client): + common_id = "635916b2c8cb7a15f28af40a" + + backend_prof_1_old = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + ui_prof_1_new = { + "profileName": "profile_1_edit", + "frequency": 20, + "conditions": { + "condition": "smart", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] + }, + "varBinds": [{"component": "IF-MIB", "object": "ifInDiscards", "index": "1"}] + } + + backend_prof_1_new = { + "profile_1_edit": { + "frequency": 20, + "condition": {"type": "field", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, + "varBinds": [["IF-MIB", "ifInDiscards", "1"]] + } + } + + backend_inventory = { + "_id": ObjectId(common_id), + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "profile_1;profile_2", + "smart_profiles": False, + "delete": False + } + + backend_inventory_update = { + "address": "11.0.78.114", + "port": 161, + "version": "3", + "community": "", + "secret": "my_secret", + "walk_interval": 1800, + "security_engine": "1234aabbccd", + "profiles": "profile_1_edit;profile_2", + "smart_profiles": False, + "delete": False + } + + m_find.side_effect = [ + [], + [backend_prof_1_old], + [backend_inventory] + ] + m_update.return_value = None + + calls_find = [call({f"profile_1_edit": {"$exists": True}, "_id": {"$ne": ObjectId(common_id)}}), + call({'_id': ObjectId(common_id)}, {"_id": 0}), + call({"profiles": {"$regex": '.*profile_1.*'}, "delete": False})] + + calls_update = [call({'_id': ObjectId(common_id)}, {"$rename": {"profile_1": "profile_1_edit"}}), + call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_update}), + call({'_id': ObjectId(common_id)}, + {"$set": {"profile_1_edit": backend_prof_1_new["profile_1_edit"]}})] + + response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) + + m_find.assert_has_calls(calls_find) + m_update.assert_has_calls(calls_update) + assert response.json == {"message": f"If profile_1 was used in some records in the inventory," + f" it was updated to profile_1_edit"} + +@mock.patch("pymongo.collection.Collection.update_one") +@mock.patch("pymongo.collection.Collection.find") +def test_update_profile_record_failure(m_find, m_update, client): + common_id = "635916b2c8cb7a15f28af40a" + ui_prof_1_new = { + "profileName": "profile_1", + "frequency": 20, + "conditions": { + "condition": "field", + "field": "SNMPv2-MIB.sysObjectID", + "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] + }, + "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}] + } + + backend_prof_1_old = { + "profile_1": { + "frequency": 10, + "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] + } + } + + m_find.return_value = [backend_prof_1_old] + response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) + assert not m_update.called + assert response.json == {"message": "Profile with name profile_1 already exists. Profile was not edited."} \ No newline at end of file diff --git a/backend/tests/ui_handling/test_post_endpoints.py b/backend/tests/ui_handling/test_post_endpoints.py deleted file mode 100644 index d8157ca..0000000 --- a/backend/tests/ui_handling/test_post_endpoints.py +++ /dev/null @@ -1,998 +0,0 @@ -from unittest import mock -from unittest.mock import call, Mock -from bson import ObjectId - - -@mock.patch("pymongo.collection.Collection.insert_one") -def test_add_profile_record(m_client, client): - m_client.return_value = None - ui_prof = { - "profileName": "profile_1", - "frequency": 10, - "conditions": { - "condition": "None", - "field": "", - "patterns": None - }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}, - {"family": "IF-MIB", "category": "", "index": ""}, - {"family": "IF-MIB", "category": "ifOutErrors", "index": ""}] - } - - backend_prof = { - "profile_1": { - "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] - } - } - - response = client.post("/profiles/add", json=ui_prof) - - assert m_client.call_args == call(backend_prof) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.find") -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -def test_delete_profile_record(m_update, m_delete, m_find, client): - common_id = "635916b2c8cb7a15f28af40a" - profile = { - "profile_1": { - "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] - } - } - - backend_inventory = { - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "profile_1;profile_2", - "smart_profiles": False, - "delete": False - } - - backend_inventory_update = { - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "profile_2", - "smart_profiles": False, - "delete": False - } - - m_find.side_effect = [ - [profile], - [backend_inventory] - ] - m_delete.return_value = None - m_update.return_value = None - - response = client.post(f'/profiles/delete/{common_id}') - - calls = [call({'_id': ObjectId(common_id)}, {"_id": 0}), call({"profiles": {"$regex": '.*profile_1.*'}})] - m_find.assert_has_calls(calls) - assert m_delete.call_args == call({"_id": ObjectId(common_id)}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_update}) - assert response.json == {"message": f"If profile_1 was used in some records in the inventory," - f" those records were updated"} - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_update_profile_record_no_name_change(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - ui_prof_1_new = { - "profileName": "profile_1", - "frequency": 20, - "conditions": { - "condition": "field", - "field": "SNMPv2-MIB.sysObjectID", - "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] - }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}] - } - - backend_prof_1_old = { - "profile_1": { - "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] - } - } - - backend_prof_1_new = { - "profile_1": { - "frequency": 20, - "condition": {"type": "field", - "field": "SNMPv2-MIB.sysObjectID", - "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, - "varBinds": [["IF-MIB", "ifInDiscards", 1]] - } - } - - m_find.return_value = [backend_prof_1_old] - m_update.return_value = None - - response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) - assert m_update.call_args == call({'_id': ObjectId(common_id)}, - {"$set": {"profile_1": backend_prof_1_new["profile_1"]}}) - assert response.json == "success" - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_update_profile_record_with_name_change(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - backend_prof_1_old = { - "profile_1": { - "frequency": 10, - "varBinds": [["IF-MIB", "ifInDiscards", 1], ["IF-MIB"], ["IF-MIB", "ifOutErrors"]] - } - } - - ui_prof_1_new = { - "profileName": "profile_1_edit", - "frequency": 20, - "conditions": { - "condition": "field", - "field": "SNMPv2-MIB.sysObjectID", - "patterns": [{"pattern": "^MIKROTIK"}, {"pattern": "^MIKROTIK2"}] - }, - "varBinds": [{"family": "IF-MIB", "category": "ifInDiscards", "index": "1"}] - } - - backend_prof_1_new = { - "profile_1_edit": { - "frequency": 20, - "condition": {"type": "field", - "field": "SNMPv2-MIB.sysObjectID", - "patterns": ["^MIKROTIK", "^MIKROTIK2"]}, - "varBinds": [["IF-MIB", "ifInDiscards", 1]] - } - } - - backend_inventory = { - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "profile_1;profile_2", - "smart_profiles": False, - "delete": False - } - - backend_inventory_update = { - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "profile_1_edit;profile_2", - "smart_profiles": False, - "delete": False - } - - m_find.side_effect = [ - [backend_prof_1_old], - [backend_inventory] - ] - m_update.return_value = None - - calls_find = [call({'_id': ObjectId(common_id)}, {"_id": 0}), - call({"profiles": {"$regex": '.*profile_1.*'}})] - - calls_update = [call({'_id': ObjectId(common_id)}, {"$rename": {"profile_1": "profile_1_edit"}}), - call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_update}), - call({'_id': ObjectId(common_id)}, - {"$set": {"profile_1_edit": backend_prof_1_new["profile_1_edit"]}})] - - response = client.post(f"/profiles/update/{common_id}", json=ui_prof_1_new) - - m_find.assert_has_calls(calls_find) - m_update.assert_has_calls(calls_update) - assert response.json == {"message": f"If profile_1 was used in some records in the inventory," - f" it was updated to profile_1_edit"} - - -@mock.patch("pymongo.collection.Collection.insert_one") -def test_add_group_record(m_insert, client): - ui_group = { - "groupName": "group_1" - } - - backend_group = { - "group_1": [] - } - - response = client.post(f"/groups/add", json=ui_group) - m_insert.return_value = None - assert m_insert.call_args == call(backend_group) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_update_group(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_group_new = { - "_id": common_id, - "groupName": "group_1_edit" - } - - backend_group_old = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.2.3.4"}, - ] - } - - calls_update = [ - call({'_id': ObjectId(common_id)}, {"$rename": {"group_1": "group_1_edit"}}), - call({"address": "group_1"}, {"$set": {"address": 'group_1_edit'}}) - ] - - m_find.side_effect = [ - [], - [backend_group_old] - ] - - response = client.post(f"/groups/update/{common_id}", json=ui_group_new) - m_update.assert_has_calls(calls_update) - assert response.json == {"message": "group_1 was also renamed to group_1_edit in the inventory"} - - -@mock.patch("pymongo.collection.Collection.find") -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.MongoClient.start_session") -def test_delete_group_and_devices(m_session, m_update, m_delete, m_find, client): - common_id = "635916b2c8cb7a15f28af40a" - backend_group = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.2.3.4"}, - ] - } - m_session.return_value.__enter__.return_value.start_transaction.__enter__ = Mock() - - m_find.return_value = [backend_group] - m_delete.return_value = None - m_update.return_value = None - - response = client.post(f"/groups/delete/{common_id}") - assert m_find.call_args == call({'_id': ObjectId(common_id)}) - assert m_delete.call_args == call({'_id': ObjectId(common_id)}) - assert m_update.call_args == call({"address": "group_1"}, {"$set": {"delete": True}}) - assert response.json == {"message": "If group_1 was configured in the inventory, it was deleted from there"} - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_device_to_group(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_group_device_new = { - "address": "2.2.2.2", - "port": "", - "version": "3", - "community": "", - "secret": "snmpv3", - "securityEngine": "", - "groupId": str(common_id) - } - - backend_group_old = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.2.3.4"}, - ] - } - - backend_group_new = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.2.3.4"}, - {"address": "2.2.2.2", "version": "3", "secret": "snmpv3"} - ] - } - - m_find.return_value = [backend_group_old] - m_update.return_value = None - - response = client.post(f"/devices/add", json=ui_group_device_new) - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_update_device_from_group(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_group_device_update = { - "address": "2.2.2.3", - "port": "1161", - "version": "2c", - "community": "public", - "secret": "", - "securityEngine": "1112233aabbccdee", - "groupId": str(common_id) - } - - backend_group_old = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.1.1.1"}, - {"address": "2.2.2.2"}, - {"address": "3.3.3.3"} - ] - } - - backend_group_new = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.1.1.1"}, - {"address": "2.2.2.3", "port": 1161, "version": "2c", "community": "public", - "security_engine": "1112233aabbccdee"}, - {"address": "3.3.3.3"} - ] - } - - m_find.return_value = [backend_group_old] - m_update.return_value = None - - response = client.post(f"/devices/update/{common_id}-1", json=ui_group_device_update) - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.find") -def test_delete_device_from_group_record(m_find, m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - - backend_group_old = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.1.1.1"}, - {"address": "2.2.2.3", "port": 1161, "version": "2c", "community": "public", - "security_engine": "1112233aabbccdee"}, - {"address": "3.3.3.3"} - ] - } - - backend_group_new1 = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "1.1.1.1"}, - {"address": "3.3.3.3"} - ] - } - - backend_group_new2 = { - "_id": ObjectId(common_id), - "group_1": [ - {"address": "3.3.3.3"} - ] - } - - m_find.return_value = [backend_group_old] - m_update.return_value = None - response = client.post(f"/devices/delete/{common_id}-1") - - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new1}) - assert response.json == "success" - - m_find.return_value = [backend_group_new1] - response = client.post(f"/devices/delete/{common_id}-0") - assert m_find.call_args == call({'_id': ObjectId(common_id)}, {"_id": 0}) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_group_new2}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_inventory_record_single_host_success(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "11.0.78.114", - "port": "161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - backend_inventory_new = { - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - calls_find = [ - call({'address': "11.0.78.114", 'port': 161, "delete": False}), - call({'address': "11.0.78.114", 'port': 161, "delete": True}) - ] - - m_find.side_effect = [[], []] - # Test adding a new device, when there was no device with the same address and port with deleted flag set to True - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_insert.call_args == call(backend_inventory_new) - assert not m_update.called - assert not m_delete.called - assert response.json == "success" - - m_find.side_effect = [ - [], - [{ - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": True - }] - ] - - # Test adding a new device, when there was a device with the same address and port with deleted flag set to True - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_insert.call_args == call(backend_inventory_new) - assert not m_update.called - assert m_delete.call_args == call({"_id": ObjectId(common_id)}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_edit_inventory_record_single_host_success(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "11.0.78.114", - "port": "161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - backend_inventory_new = { - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - calls_find = [ - call({'address': "11.0.78.114", 'port': 161, "delete": False}), - call({'address': "11.0.78.114", 'port': 161, "delete": True}) - ] - - m_find.side_effect = [[], []] - - # Test editing a device with changing its address and port - response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new}) - assert not m_insert.called - assert not m_delete.called - assert response.json == "success" - - m_find.side_effect = [ - [{ - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": True - }], - [] - ] - - # Test editing a device without changing its address and port - response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new}) - assert not m_insert.called - assert not m_delete.called - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_inventory_record_single_host_failed(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "11.0.78.114", - "port": "1161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [ - [{ - "_id": ObjectId(common_id), - "address": "11.0.78.114", - "port": 1161, - "version": "2c", - "community": "public", - "secret": "", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": False - }], - [] - ] - - calls_find = [ - call({'address': "11.0.78.114", 'port': 1161, "delete": False}), - call({'address': "11.0.78.114", 'port': 1161, "delete": True}) - ] - - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"Inventory record for 11.0.78.114:1161 already exists. Record was not added."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_edit_inventory_record_single_host_failed(m_find, m_insert, m_update, m_delete, client): - edit_id = "635916b2c8cb7a15f28af40a" - existing_id = "035916b2c8cb7a15f28af40b" - - ui_inventory_new = { - "address": "11.0.78.114", - "port": "1161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [ - [{ - "_id": ObjectId(existing_id), - "address": "11.0.78.114", - "port": 1161, - "version": "2c", - "community": "public", - "secret": "", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": False - }], - [] - ] - - calls_find = [ - call({'address': "11.0.78.114", 'port': 1161, "delete": False}), - call({'address': "11.0.78.114", 'port': 1161, "delete": True}) - ] - - response = client.post(f"/inventory/update/{edit_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"Inventory record for 11.0.78.114:1161 already exists. Record was not edited."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_inventory_record_group_success(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "group_1", - "port": "161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - backend_inventory_new = { - "address": "group_1", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [ - [], - [], - [{ - "_id": ObjectId(common_id), - "group_1": [{"address": "1.2.3.4"}] - }] - ] - - calls_find = [ - call({'address': "group_1", "delete": False}), - call({'address': "group_1", "delete": True}), - call({'group_1': {"$exists": 1}}) - ] - - # Test adding a new group, when there was no group with the same name with deleted flag set to True - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_insert.call_args == call(backend_inventory_new) - assert not m_update.called - assert not m_delete.called - assert response.json == "success" - - # Test adding a new group, when there was a group with the same name with deleted flag set to True - m_find.side_effect = [ - [], - [{ - "_id": ObjectId(common_id), - "address": "group_1", - "port": 1161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": True - }], - [{ - "_id": ObjectId(common_id), - "group_1": [{"address": "1.2.3.4"}] - }] - ] - - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_insert.call_args == call(backend_inventory_new) - assert not m_update.called - assert m_delete.call_args == call({"_id": ObjectId(common_id)}) - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_edit_inventory_record_group_success(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "group_1", - "port": "161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - backend_inventory_new = { - "address": "group_1", - "port": 161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "1234aabbccd", - "profiles": "prof1;prof2;prof3", - "smart_profiles": False, - "delete": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [[], [], [{"_id": ObjectId(common_id), "group_1": [{"address": "1.2.3.4"}]}]] - - calls_find = [ - call({'address': "group_1", "delete": False}), - call({'address': "group_1", "delete": True}), - call({'group_1': {"$exists": 1}}) - ] - - # Test editing a group with changing group name - response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new}) - assert not m_insert.called - assert not m_delete.called - assert response.json == "success" - - m_find.side_effect = [[{ - "_id": ObjectId(common_id), - "address": "group_1", - "port": 1161, - "version": "3", - "community": "", - "secret": "my_secret", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": True - }], - [], - [{"_id": ObjectId(common_id), "group_1": [{"address": "1.2.3.4"}]}]] - - # Test editing a group without changing group name - response = client.post(f"/inventory/update/{common_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": backend_inventory_new}) - assert not m_insert.called - assert not m_delete.called - assert response.json == "success" - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_add_inventory_record_group_failed(m_find, m_insert, m_update, m_delete, client): - common_id = "635916b2c8cb7a15f28af40a" - - ui_inventory_new = { - "address": "group_1", - "port": "1161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - # Test adding a new group, when the same group is already in the inventory. - m_find.side_effect = [ - [{ - "_id": ObjectId(common_id), - "address": "group_1", - "port": 161, - "version": "2c", - "community": "public", - "secret": "", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": False - }], - [], - [{ - "_id": ObjectId(common_id), - "group_1": [{"address": "1.2.3.4"}] - }] - ] - - calls_find = [ - call({'address': "group_1", "delete": False}), - call({'address': "group_1", "delete": True}), - call({'group_1': {"$exists": 1}}) - ] - - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"Inventory record for group_1 already exists. Record was not added."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - # Test adding a new group, when there is no group configured. - m_find.side_effect = [ - [], - [], - [] - ] - - response = client.post(f"/inventory/add", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"There is no group group_1 configured. Record was not added."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - -@mock.patch("pymongo.collection.Collection.delete_one") -@mock.patch("pymongo.collection.Collection.update_one") -@mock.patch("pymongo.collection.Collection.insert_one") -@mock.patch("pymongo.collection.Collection.find") -def test_edit_inventory_record_group_failed(m_find, m_insert, m_update, m_delete, client): - edit_id = "635916b2c8cb7a15f28af40a" - existing_id = "035916b2c8cb7a15f28af40b" - - ui_inventory_new = { - "address": "group_1", - "port": "1161", - "version": "3", - "community": "", - "secret": "my_secret", - "walkInterval": 1800, - "securityEngine": "1234aabbccd", - "profiles": ["prof1", "prof2", "prof3"], - "smartProfiles": False - } - - m_insert.return_value = None - m_update.return_value = None - m_delete.return_value = None - - m_find.side_effect = [ - [{ - "_id": ObjectId(existing_id), - "address": "group_1", - "port": 1161, - "version": "2c", - "community": "public", - "secret": "", - "walk_interval": 1800, - "security_engine": "", - "profiles": "prof1", - "smart_profiles": False, - "delete": False - }], - [], - [{ - "_id": ObjectId(existing_id), - "group_1": [{"address": "1.2.3.4"}] - }] - ] - - calls_find = [ - call({'address': "group_1", "delete": False}), - call({'address': "group_1", "delete": True}) - ] - - response = client.post(f"/inventory/update/{edit_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"Inventory record for group_1 already exists. Record was not edited."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - m_find.side_effect = [ - [], - [], - [] - ] - - # Test editing a group, when there is no group configured. - response = client.post(f"/inventory/update/{edit_id}", json=ui_inventory_new) - m_find.assert_has_calls(calls_find) - assert response.json == {"message": f"There is no group group_1 configured. Record was not edited."} - assert response.status_code == 400 - assert not m_insert.called - assert not m_update.called - assert not m_delete.called - - -@mock.patch("pymongo.collection.Collection.update_one") -def test_delete_inventory_record(m_update, client): - common_id = "635916b2c8cb7a15f28af40a" - m_update.return_value = None - response = client.post(f"/inventory/delete/{common_id}") - assert m_update.call_args == call({"_id": ObjectId(common_id)}, {"$set": {"delete": True}}) - assert response.json == "success" diff --git a/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml b/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml new file mode 100644 index 0000000..6e18d10 --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/poller_inventory.yaml @@ -0,0 +1,4 @@ +inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;in_profile,t,f + group1,1161,2c,public,,,1800,single_metric;multiple_conditions,f,f diff --git a/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml b/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml new file mode 100644 index 0000000..4017b3c --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/scheduler_groups.yaml @@ -0,0 +1,12 @@ +group1: +- address: 52.14.243.157 + port: 1163 +- address: 20.14.10.0 + port: 161 +group2: +- address: 0.10.20.30 +- address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + security_engine: aabbccdd1234 diff --git a/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml b/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml new file mode 100644 index 0000000..151a2ad --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/scheduler_profiles.yaml @@ -0,0 +1,51 @@ +single_metric: + frequency: 60 + varBinds: + - ['IF-MIB', 'ifMtu', '1'] +small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] +gt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + - ['IF-MIB', 'ifOutErrors'] + - ['IF-MIB', 'ifOutOctets'] \ No newline at end of file diff --git a/backend/tests/yamls_for_tests/reference_files/values.yaml b/backend/tests/yamls_for_tests/reference_files/values.yaml new file mode 100644 index 0000000..8bc3a57 --- /dev/null +++ b/backend/tests/yamls_for_tests/reference_files/values.yaml @@ -0,0 +1,161 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + group1: + - address: 52.14.243.157 + port: 1163 + - address: 20.14.10.0 + port: 161 + group2: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + security_engine: aabbccdd1234 + profiles: | + single_metric: + frequency: 60 + varBinds: + - ['IF-MIB', 'ifMtu', '1'] + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + gt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + multiple_conditions: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + - ['IF-MIB', 'ifOutErrors'] + - ['IF-MIB', 'ifOutOctets'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;in_profile,t,f + group1,1161,2c,public,,,1800,single_metric;multiple_conditions,f,f +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true diff --git a/backend/tests/yamls_for_tests/values_test/.gitignore b/backend/tests/yamls_for_tests/values_test/.gitignore new file mode 100644 index 0000000..0372f75 --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/.gitignore @@ -0,0 +1 @@ +sc4snmp_ui_*.yaml diff --git a/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml b/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml new file mode 100644 index 0000000..fc5bebf --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/values-before-edit.yaml @@ -0,0 +1,139 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + some_group: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + - address: 10.1.3.157 + port: 1165 + profiles: | + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + conditional_profile_greater_than: + frequency: 100 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;conditional_profile_greater_than,t,f + some_group,1161,2c,public,,,1800,single_metric;in_profile,f,t + 156.0.10.91,161,2c,public,,,1800,conditional_profile_greater_than,t,t +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true diff --git a/backend/tests/yamls_for_tests/values_test/values.yaml b/backend/tests/yamls_for_tests/values_test/values.yaml new file mode 100644 index 0000000..6b88a85 --- /dev/null +++ b/backend/tests/yamls_for_tests/values_test/values.yaml @@ -0,0 +1,139 @@ +UI: + enable: true + frontEnd: + NodePort: 30001 + backEnd: + NodePort: 30002 + valuesFileDirectory: /home/ubuntu/values_folder + valuesFileName: values.yaml + keepSectionFiles: true +image: + pullPolicy: Always +splunk: + enabled: true + protocol: https + host: 0.0.0.0 + token: 00000000-0000-0000-0000-000000000000 + insecureSSL: 'true' + port: '8088' + + sourcetypeTraps: mytype:trap + # sourcetype for non-metric polling event + sourcetypePollingEvents: mytype:polling + # sourcetype for metric polling event + sourcetypePollingMetrics: mytype:metric +traps: + #service: + # type: NodePort + # externalTrafficPolicy: Cluster + # nodePort: 30000 + communities: + 2c: + - public + - homelab + #usernameSecrets: + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + + #loadBalancerIP: The IP address in the metallb pool + loadBalancerIP: 0.0.0.0 +worker: + # There are 3 types of workers + trap: + # replicaCount: number of trap-worker pods which consumes trap tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + poller: + # replicaCount: number of poller-worker pods which consumes polling tasks + replicaCount: 1 + #autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + sender: + # replicaCount: number of sender-worker pods which consumes sending tasks + replicaCount: 1 + # autoscaling: use it instead of replicaCount in order to make pods scalable by itself + #autoscaling: + # enabled: true + # minReplicas: 2 + # maxReplicas: 10 + # targetCPUUtilizationPercentage: 80 + # udpConnectionTimeout: timeout in seconds for SNMP operations + #udpConnectionTimeout: 5 + logLevel: DEBUG +scheduler: + logLevel: DEBUG + groups: | + some_group: + - address: 0.10.20.30 + - address: 52.14.243.157 + port: 1165 + version: '3' + secret: 'mysecret' + - address: 10.1.3.157 + port: 1165 + profiles: | + small_walk: + condition: + type: walk + varBinds: + - ['IP-MIB'] + - ['IF-MIB'] + conditional_profile_greater_than: + frequency: 100 + conditions: + - field: IF-MIB.ifIndex + operation: "gt" + value: 1 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + lt_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifIndex + operation: "lt" + value: 2 + varBinds: + - ['IF-MIB', 'ifOutDiscards'] + in_profile: + frequency: 10 + conditions: + - field: IF-MIB.ifDescr + operation: "in" + value: + - "eth0" + - "test value" + varBinds: + - ['IF-MIB', 'ifOutDiscards'] +poller: + metricsIndexingEnabled: true + usernameSecrets: + - testv3 + # - sc4snmp-hlab-sha-aes + # - sc4snmp-hlab-sha-des + inventory: |- + address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete + 1.1.1.1,161,2c,public,,,1800,small_walk;conditional_profile_greater_than,t,f + some_group,1161,2c,public,,,1800,single_metric;in_profile,f,t + 156.0.10.91,161,2c,public,,,1800,conditional_profile_greater_than,t,t +# group2,163,2c,public,,,3000,generic_switch,, + # 10.0.0.100,,3,,sc4snmp-hlab-sha-des,,1800,,, +sim: + enabled: false + signalfxToken: + signalfxRealm: us1 +mongodb: + pdb: + create: true + persistence: + storageClass: microk8s-hostpath + volumePermissions: + enabled: true diff --git a/frontend/Dockerfile b/frontend/Dockerfile index 03c6477..735ce7b 100644 --- a/frontend/Dockerfile +++ b/frontend/Dockerfile @@ -3,6 +3,7 @@ WORKDIR /frontend ENV PATH /frontend/node_modules/.bin:$PATH COPY package.json yarn.lock lerna.json ./ COPY ./packages ./packages +RUN apk add --update python3 make g++ && rm -rf /var/cache/apk/* RUN yarn install RUN yarn run build RUN apk add curl diff --git a/frontend/lerna.json b/frontend/lerna.json index 0159799..21878c4 100644 --- a/frontend/lerna.json +++ b/frontend/lerna.json @@ -1,6 +1,6 @@ { "lerna": "^6.6.2", - "version": "0.0.1", + "version": "1.0.0-beta.18", "command": { "publish": { "ignoreChanges": ["*.md"] diff --git a/frontend/packages/manager/demo/splunk-app/appserver/templates/demo.html b/frontend/packages/manager/demo/splunk-app/appserver/templates/demo.html index 1ff433a..1e49b85 100644 --- a/frontend/packages/manager/demo/splunk-app/appserver/templates/demo.html +++ b/frontend/packages/manager/demo/splunk-app/appserver/templates/demo.html @@ -5,7 +5,7 @@ - Manager Demo App + SC4SNMP Manager diff --git a/frontend/packages/manager/demo/standalone/index.html b/frontend/packages/manager/demo/standalone/index.html index 8912646..dc7ac45 100644 --- a/frontend/packages/manager/demo/standalone/index.html +++ b/frontend/packages/manager/demo/standalone/index.html @@ -3,7 +3,7 @@ - Manager + SC4SNMP Manager diff --git a/frontend/packages/manager/jest.config.js b/frontend/packages/manager/jest.config.js index af03d11..a040538 100644 --- a/frontend/packages/manager/jest.config.js +++ b/frontend/packages/manager/jest.config.js @@ -1,3 +1,7 @@ module.exports = { - testMatch: ['**/*.unit.[jt]s?(x)'], + testMatch: ['**/*.test.[jt]s?(x)'], + testEnvironment: "jsdom", + setupFilesAfterEnv: [ + "@testing-library/jest-dom/extend-expect" + ] }; diff --git a/frontend/packages/manager/package.json b/frontend/packages/manager/package.json index 98dc49b..bf67755 100644 --- a/frontend/packages/manager/package.json +++ b/frontend/packages/manager/package.json @@ -1,6 +1,6 @@ { "name": "@splunk/manager", - "version": "0.0.1", + "version": "1.0.0-beta.18", "license": "UNLICENSED", "scripts": { "build": "NODE_ENV=production webpack --bail --config demo/webpack.standalone.config.js", @@ -14,7 +14,7 @@ "start:app": "webpack --watch --config demo/webpack.splunkapp.config.js", "start:demo": "webpack-dev-server --config demo/webpack.standalone.config.js --port ${DEMO_PORT-8080} --host 0.0.0.0", "stylelint": "stylelint \"src/**/*.{js,jsx}\" --config stylelint.config.js", - "test": "jest", + "test": "DEBUG_PRINT_LIMIT=1000000 jest", "test:ci": "JEST_JUNIT_OUTPUT_DIR=./test-reports JEST_JUNIT_OUTPUT_NAME=unit-results.xml JEST_JUNIT_CLASSNAME=unit yarn run test --ci --reporters=default jest-junit --coverage --coverageDirectory=coverage_report/coverage_maps_unit --coverageReporters=cobertura", "test:watch": "jest --watch" }, @@ -26,7 +26,9 @@ "css-loader": "^6.7.1", "react-router-dom": "6", "scriptjs": "^2.5.9", - "style-loader": "^3.3.1" + "style-loader": "^3.3.1", + "history": "5.3.0", + "qs": "6.11.2" }, "devDependencies": { "@babel/core": "^7.2.0", @@ -35,6 +37,11 @@ "@splunk/splunk-utils": "^2.3.4", "@splunk/stylelint-config": "^4.0.0", "@splunk/webpack-configs": "^5.0.0", + "@testing-library/react": "12.1.2", + "@testing-library/dom": "9.3.1", + "@testing-library/jest-dom": "5.16.5", + "@jest/globals": "^29.6.1", + "jest-environment-jsdom": "^29.6.1", "babel-eslint": "^10.1.0", "babel-loader": "^8.0.4", "chai": "^3.5.0", diff --git a/frontend/packages/manager/src/Manager.jsx b/frontend/packages/manager/src/Manager.jsx index 1e0f518..c8b2fe9 100644 --- a/frontend/packages/manager/src/Manager.jsx +++ b/frontend/packages/manager/src/Manager.jsx @@ -1,12 +1,9 @@ -import React, {useCallback, useContext, useState} from 'react'; -import { Link, Route, Routes, Switch } from 'react-router-dom'; +import React from 'react'; import ErrorsModal from "./components/ErrorsModal"; import Menu from "./components/menu_header/Menu"; import Header from "./components/menu_header/Header"; import TabPanels from "./components/menu_header/TabPanels"; -import MenuHeaderContxt from './store/menu-header-contxt'; - import { ButtonsContextProvider } from "./store/buttons-contx"; import { ErrorsModalContextProvider } from "./store/errors-modal-contxt"; @@ -18,7 +15,7 @@ import { GroupContextProvider } from "./store/group-contxt"; import { FontStyles } from "./styles/FontsStyles"; function Uncontrolled() { - const MenuCtx = useContext(MenuHeaderContxt); + return ( @@ -33,7 +30,6 @@ function Uncontrolled() { - diff --git a/frontend/packages/manager/src/components/ButtonsModal.jsx b/frontend/packages/manager/src/components/ButtonsModal.jsx deleted file mode 100644 index 575712a..0000000 --- a/frontend/packages/manager/src/components/ButtonsModal.jsx +++ /dev/null @@ -1,34 +0,0 @@ -import React, {useState, useRef, useContext} from 'react'; -import Button from '@splunk/react-ui/Button'; -import ControlGroup from '@splunk/react-ui/ControlGroup'; -import Modal from '@splunk/react-ui/Modal'; -import P from '@splunk/react-ui/Paragraph'; -import Select from '@splunk/react-ui/Select'; -import Multiselect from '@splunk/react-ui/Multiselect'; -import Text from '@splunk/react-ui/Text'; -import ButtonsContext from "../store/buttons-contx"; - -function ButtonsModal(props) { - const BtnCtx = useContext(ButtonsContext); - - const handleRequestClose = () => { - BtnCtx.setButtonsOpen(false); - }; - - return ( -
- - - -
- ); -} - -export default ButtonsModal; diff --git a/frontend/packages/manager/src/components/DeleteModal.jsx b/frontend/packages/manager/src/components/DeleteModal.jsx index d45a03f..10eaf4d 100644 --- a/frontend/packages/manager/src/components/DeleteModal.jsx +++ b/frontend/packages/manager/src/components/DeleteModal.jsx @@ -1,8 +1,8 @@ -import React, { useCallback, useState, useRef, useContext } from 'react'; +import React, { useCallback, useState, useContext } from 'react'; import Button from '@splunk/react-ui/Button'; import Modal from '@splunk/react-ui/Modal'; -import axios from "axios"; import P from '@splunk/react-ui/Paragraph'; +import Message from "@splunk/react-ui/Message"; import ButtonsContext from "../store/buttons-contx"; function DeleteModal(props) { @@ -26,10 +26,14 @@ function DeleteModal(props) {

Are you sure you want to delete {props.deleteName} ?

+ {("customWarning" in props && props["customWarning"] != null) ? + ( + {props["customWarning"]} + ) : null}
-