diff --git a/panther_analysis_tool/helpers.py b/panther_analysis_tool/helpers.py deleted file mode 100644 index d25fc6f7..00000000 --- a/panther_analysis_tool/helpers.py +++ /dev/null @@ -1,62 +0,0 @@ -# Panther is a scalable, powerful, cloud-native SIEM written in Golang/React. -# Copyright (C) 2020 Panther Labs Inc -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . -"""Utility functions provided to policies during execution.""" -from typing import Any, Dict - - -class BadLookup(Exception): - """Error returned when a resource lookup fails.""" - - -class PantherBadInput(Exception): - """Error returned when a Panther helper function is provided bad input.""" - - -def get_s3_arn_by_name(_: str) -> str: - """This function is used to construct an s3 bucket ARN from its name.""" - return 'arn:aws:s3:::name' - - -def s3_lookup_by_name(name: str) -> Dict[str, Any]: - """This function is used to get an S3 bucket resource from just its name.""" - return resource_lookup(get_s3_arn_by_name(name)) - - -def dynamo_lookup(_: str) -> Dict[str, Any]: - """Make a dynamodb GetItem API call.""" - return {} - - -def resource_lookup(resource_id: str) -> Dict[str, Any]: - """This function is used to get a resource from the resources-api based on its resourceID.""" - # Validate input so we can provide meaningful error messages to users - if resource_id == '': - raise PantherBadInput('resourceId cannot be blank') - - # Get the item from dynamo - response = dynamo_lookup(resource_id) - - # Check if dynamo failed - status_code = response['ResponseMetadata']['HTTPStatusCode'] - if status_code != 200: - raise BadLookup('dynamodb - ' + str(status_code) + ' HTTPStatusCode') - - # Check if the item was found - if 'Item' not in response: - raise BadLookup(resource_id + ' not found') - - # Return just the attributes of the item - return response['Item']['attributes'] diff --git a/panther_analysis_tool/main.py b/panther_analysis_tool/main.py index 3ef3fbb2..29988197 100644 --- a/panther_analysis_tool/main.py +++ b/panther_analysis_tool/main.py @@ -32,8 +32,6 @@ import boto3 -from . import helpers - class TestCase(): @@ -51,32 +49,76 @@ def get(self, arg: str, default: Any = None) -> Any: return self._data.get(arg, default) -SPEC_SCHEMA = Schema( +TYPE_SCHEMA = Schema({ + 'AnalysisType': Or("policy", "rule", "global"), +}, + ignore_extra_keys=True) + +GLOBAL_SCHEMA = Schema( + { + 'AnalysisType': Or("global"), + 'Filename': str, + Optional('Description'): str, + Optional('Tags'): [str], + }, + ignore_extra_keys=False) + +POLICY_SCHEMA = Schema( { 'AnalysisType': - Or("policy", "rule"), + Or("policy"), 'Enabled': bool, 'Filename': str, - Optional('PolicyID'): - str, - Optional('RuleID'): + 'PolicyID': str, - Optional('ResourceTypes'): [str], - Optional('LogTypes'): [str], + 'ResourceTypes': [str], 'Severity': Or("Info", "Low", "Medium", "High", "Critical"), Optional('ActionDelaySeconds'): int, - Optional('AlertFormat'): - str, Optional('AutoRemediationID'): str, Optional('AutoRemediationParameters'): object, Optional('Description'): str, + Optional('DisplayName'): + str, + Optional('Reference'): + str, + Optional('Runbook'): + str, + Optional('Suppressions'): [str], + Optional('Tags'): [str], + Optional('Reports'): { + str: object + }, + Optional('Tests'): [{ + 'Name': str, + 'ResourceType': str, + 'ExpectedResult': bool, + 'Resource': object, + }], + }, + ignore_extra_keys=False) + +RULE_SCHEMA = Schema( + { + 'AnalysisType': + Or("rule"), + 'Enabled': + bool, + 'Filename': + str, + 'RuleID': + str, + 'LogTypes': [str], + 'Severity': + Or("Info", "Low", "Medium", "High", "Critical"), + Optional('Description'): + str, Optional('DedupPeriodMinutes'): int, Optional('DisplayName'): @@ -92,11 +134,9 @@ def get(self, arg: str, default: Any = None) -> Any: }, Optional('Tests'): [{ 'Name': str, - Optional('LogType'): str, - Optional('ResourceType'): str, + 'LogType': str, 'ExpectedResult': bool, - Optional('Log'): object, - Optional('Resource'): object, + 'Log': object, }], }, ignore_extra_keys=False) @@ -127,13 +167,6 @@ def load_module(filename: str) -> Tuple[Any, Any]: return module, None -# import the panther helper stubs -# -# When mocking is supported, these will be mocked. For now this is just here so that the -# policies that import from Panther will pass validation. -sys.modules['panther'], _ = load_module(helpers.__file__) - - def load_analysis_specs(directory: str) -> Iterator[Tuple[str, str, Any]]: """Loads the analysis specifications from a file. @@ -223,9 +256,9 @@ def upload_analysis(args: argparse.Namespace) -> Tuple[int, str]: json.dumps({ 'Data': base64.b64encode(zip_bytes).decode('utf-8'), # The UserID is required by Panther for this API call, but we have no way of - # acquiring it and it isn't used for anything. This is a random, valid UUID so - # that the input can be validated by the API. - 'UserID': 'c273fd96-88d0-41c4-a74e-941e17832915', + # acquiring it and it isn't used for anything. This is a valid UUID used by the + # Panther deployment tool to indicate this action was performed automatically. + 'UserID': '00000000-0000-4000-8000-000000000000', }), } @@ -239,6 +272,9 @@ def upload_analysis(args: argparse.Namespace) -> Tuple[int, str]: response_payload = json.loads(response_str) if response_payload['statusCode'] != 200: + logging.warning( + 'Failed to upload to Panther\n\tstatus code: %s\n\terror message: %s', + response_payload['statusCode'], response_payload['body']) return 1, '' body = json.loads(response_payload['body']) @@ -258,46 +294,35 @@ def test_analysis(args: argparse.Namespace) -> Tuple[int, list]: Returns: A tuple of the return code, and a list of tuples containing invalid specs and their error. """ - invalid_specs = [] failed_tests: DefaultDict[str, list] = defaultdict(list) tests: List[str] = [] logging.info('Testing analysis packs in %s\n', args.path) - # First import the globals file + # First classify each file specs = list(load_analysis_specs(args.path)) - for analysis_spec_filename, dir_name, analysis_spec in specs: - if (analysis_spec.get('PolicyID') or - analysis_spec['RuleID']) != 'aws_globals': - continue + global_analysis, analysis, invalid_specs = classify_analysis(specs) + + # First import the globals + for analysis_spec_filename, dir_name, analysis_spec in global_analysis: module, load_err = load_module( os.path.join(dir_name, analysis_spec['Filename'])) # If the module could not be loaded, continue to the next if load_err: invalid_specs.append((analysis_spec_filename, load_err)) break - sys.modules['aws_globals'] = module + sys.modules['panther'] = module # Next import each policy or rule and run its tests - for analysis_spec_filename, dir_name, analysis_spec in specs: + for analysis_spec_filename, dir_name, analysis_spec in analysis: analysis_id = analysis_spec.get('PolicyID') or analysis_spec['RuleID'] - if analysis_id == 'aws_globals': - continue - - try: - SPEC_SCHEMA.validate(analysis_spec) - except (SchemaError, SchemaMissingKeyError, SchemaForbiddenKeyError, - SchemaUnexpectedTypeError) as err: - invalid_specs.append((analysis_spec_filename, err)) - continue - print(analysis_id) - # Check if the PolicyID has already been loaded + # Check if the AnalysisID has already been loaded if analysis_id in tests: - print('\t[ERROR] Conflicting PolicyID\n') + print('\t[ERROR] Conflicting AnalysisID\n') invalid_specs.append( (analysis_spec_filename, - 'Conflicting PolicyID: {}'.format(analysis_id))) + 'Conflicting AnalysisID: {}'.format(analysis_id))) continue module, load_err = load_module( @@ -309,10 +334,9 @@ def test_analysis(args: argparse.Namespace) -> Tuple[int, list]: tests.append(analysis_id) if analysis_spec['AnalysisType'] == 'policy': - run_func = module.policy + failed_tests = run_tests(analysis_spec, module.policy, failed_tests) elif analysis_spec['AnalysisType'] == 'rule': - run_func = module.rule - failed_tests = run_tests(analysis_spec, run_func, failed_tests) + failed_tests = run_tests(analysis_spec, module.rule, failed_tests) print('') for analysis_id in failed_tests: @@ -325,6 +349,37 @@ def test_analysis(args: argparse.Namespace) -> Tuple[int, list]: return int(bool(failed_tests or invalid_specs)), invalid_specs +def classify_analysis( + specs: List[Tuple[str, str, Any]] +) -> Tuple[List[Any], List[Any], List[Any]]: + # First determine the type of each file + global_analysis = [] + analysis = [] + invalid_specs = [] + + for analysis_spec_filename, dir_name, analysis_spec in specs: + try: + TYPE_SCHEMA.validate(analysis_spec) + if analysis_spec['AnalysisType'] == 'policy': + POLICY_SCHEMA.validate(analysis_spec) + analysis.append( + (analysis_spec_filename, dir_name, analysis_spec)) + if analysis_spec['AnalysisType'] == 'rule': + RULE_SCHEMA.validate(analysis_spec) + analysis.append( + (analysis_spec_filename, dir_name, analysis_spec)) + if analysis_spec['AnalysisType'] == 'global': + GLOBAL_SCHEMA.validate(analysis_spec) + global_analysis.append( + (analysis_spec_filename, dir_name, analysis_spec)) + except (SchemaError, SchemaMissingKeyError, SchemaForbiddenKeyError, + SchemaUnexpectedTypeError) as err: + invalid_specs.append((analysis_spec_filename, err)) + continue + + return (global_analysis, analysis, invalid_specs) + + def run_tests(analysis: Dict[str, Any], run_func: Callable[[TestCase], bool], failed_tests: DefaultDict[str, list]) -> DefaultDict[str, list]: diff --git a/requirements.txt b/requirements.txt index aca6da93..f1275a39 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ # functional dependencies PyYAML==5.3.1 -schema==0.7.1 -boto3==1.12.26 +schema==0.7.2 +boto3==1.12.43 # ci dependencies bandit==1.6.2 mypy==0.770 @@ -20,7 +20,8 @@ awscli==1.16.286 backcall==0.1.0 beautifulsoup4==4.8.1 binaryornot==0.4.4 -botocore==1.15.26 +bleach==3.1.3 +botocore==1.15.43 bs4==0.0.1 certifi==2019.9.11 cfn-lint==0.26.0 @@ -56,6 +57,7 @@ jmespath==0.9.4 jsonpatch==1.24 jsonpointer==2.0 jsonschema==3.1.1 +keyring==21.2.0 lazy-object-proxy==1.4.1 MarkupSafe==1.1.1 mccabe==0.6.1 @@ -64,22 +66,26 @@ mypy-extensions==0.4.3 nose==1.3.7 nose2==0.9.1 numpy==1.17.4 +-e git+git@github.com:panther-labs/panther_analysis_tool.git@d5989a49109868811c3f15b84af21e8464efdeba#egg=panther_analysis_tool parso==0.5.1 pbr==5.4.1 pexpect==4.7.0 pickleshare==0.7.5 pipenv==2018.11.26 +pkginfo==1.5.0.1 policyuniverse==1.3.2.1 poyo==0.5.0 prompt-toolkit==2.0.10 ptyprocess==0.6.0 pyasn1==0.4.8 -Pygments==2.4.2 +Pygments==2.6.1 pyrsistent==0.15.4 python-dateutil==2.8.0 pytz==2019.3 +readme-renderer==25.0 regex==2019.11.1 requests==2.22.0 +requests-toolbelt==0.9.1 rsa==3.4.2 s3transfer==0.3.2 serverlessrepo==0.1.9 @@ -90,7 +96,9 @@ soupsieve==1.9.5 statistics==1.0.3.5 stevedore==1.30.1 suricata-update==1.1.0 +tqdm==4.43.0 traitlets==4.3.3 +twine==3.1.1 typed-ast==1.4.0 typing-extensions==3.7.4 tzlocal==2.0.0 @@ -98,6 +106,7 @@ urllib3==1.25.6 virtualenv==16.7.7 virtualenv-clone==0.5.3 wcwidth==0.1.7 +webencodings==0.5.1 websocket-client==0.56.0 Werkzeug==0.16.0 whichcraft==0.6.1 diff --git a/setup.py b/setup.py index 8f538246..689d003a 100644 --- a/setup.py +++ b/setup.py @@ -2,14 +2,14 @@ setup( name='panther_analysis_tool', packages=['panther_analysis_tool'], - version='0.1.10', + version='0.2.0', license='apache-2.0', description= 'Panther command line interface for writing, testing, and packaging policies/rules.', author='Panther Labs Inc', author_email='pypi@runpanther.io', url='https://github.com/panther-labs/panther_analysis_tool', - download_url = 'https://github.com/panther-labs/panther_analysis_tool/archive/v0.1.10.tar.gz', + download_url = 'https://github.com/panther-labs/panther_analysis_tool/archive/v0.3.0.tar.gz', keywords=['Security', 'CLI'], scripts=['bin/panther_analysis_tool'], install_requires=[ @@ -18,7 +18,7 @@ 'boto3', ], classifiers=[ - 'Development Status :: 2 - Pre-Alpha', + 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Security', 'License :: OSI Approved :: Apache Software License', diff --git a/tests/fixtures/valid_policies/helpers.py b/tests/fixtures/valid_policies/helpers.py new file mode 100644 index 00000000..4dd116fb --- /dev/null +++ b/tests/fixtures/valid_policies/helpers.py @@ -0,0 +1,2 @@ +def test_helper(): + return True diff --git a/tests/fixtures/valid_policies/helpers.yml b/tests/fixtures/valid_policies/helpers.yml new file mode 100644 index 00000000..7d8bfc1f --- /dev/null +++ b/tests/fixtures/valid_policies/helpers.yml @@ -0,0 +1,6 @@ +AnalysisType: global +Filename: helpers.py +Tags: + - AWS +Description: > + Used to define global helpers and variables.