Skip to content

Commit

Permalink
Added support for globals (#19)
Browse files Browse the repository at this point in the history
* added support for globals

* fix requirements

* updated pip packaging
  • Loading branch information
nhakmiller authored Apr 22, 2020
1 parent 68558c1 commit 5b99d96
Show file tree
Hide file tree
Showing 6 changed files with 128 additions and 118 deletions.
62 changes: 0 additions & 62 deletions panther_analysis_tool/helpers.py

This file was deleted.

153 changes: 104 additions & 49 deletions panther_analysis_tool/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@

import boto3

from . import helpers


class TestCase():

Expand All @@ -51,32 +49,76 @@ def get(self, arg: str, default: Any = None) -> Any:
return self._data.get(arg, default)


SPEC_SCHEMA = Schema(
TYPE_SCHEMA = Schema({
'AnalysisType': Or("policy", "rule", "global"),
},
ignore_extra_keys=True)

GLOBAL_SCHEMA = Schema(
{
'AnalysisType': Or("global"),
'Filename': str,
Optional('Description'): str,
Optional('Tags'): [str],
},
ignore_extra_keys=False)

POLICY_SCHEMA = Schema(
{
'AnalysisType':
Or("policy", "rule"),
Or("policy"),
'Enabled':
bool,
'Filename':
str,
Optional('PolicyID'):
str,
Optional('RuleID'):
'PolicyID':
str,
Optional('ResourceTypes'): [str],
Optional('LogTypes'): [str],
'ResourceTypes': [str],
'Severity':
Or("Info", "Low", "Medium", "High", "Critical"),
Optional('ActionDelaySeconds'):
int,
Optional('AlertFormat'):
str,
Optional('AutoRemediationID'):
str,
Optional('AutoRemediationParameters'):
object,
Optional('Description'):
str,
Optional('DisplayName'):
str,
Optional('Reference'):
str,
Optional('Runbook'):
str,
Optional('Suppressions'): [str],
Optional('Tags'): [str],
Optional('Reports'): {
str: object
},
Optional('Tests'): [{
'Name': str,
'ResourceType': str,
'ExpectedResult': bool,
'Resource': object,
}],
},
ignore_extra_keys=False)

RULE_SCHEMA = Schema(
{
'AnalysisType':
Or("rule"),
'Enabled':
bool,
'Filename':
str,
'RuleID':
str,
'LogTypes': [str],
'Severity':
Or("Info", "Low", "Medium", "High", "Critical"),
Optional('Description'):
str,
Optional('DedupPeriodMinutes'):
int,
Optional('DisplayName'):
Expand All @@ -92,11 +134,9 @@ def get(self, arg: str, default: Any = None) -> Any:
},
Optional('Tests'): [{
'Name': str,
Optional('LogType'): str,
Optional('ResourceType'): str,
'LogType': str,
'ExpectedResult': bool,
Optional('Log'): object,
Optional('Resource'): object,
'Log': object,
}],
},
ignore_extra_keys=False)
Expand Down Expand Up @@ -127,13 +167,6 @@ def load_module(filename: str) -> Tuple[Any, Any]:
return module, None


# import the panther helper stubs
#
# When mocking is supported, these will be mocked. For now this is just here so that the
# policies that import from Panther will pass validation.
sys.modules['panther'], _ = load_module(helpers.__file__)


def load_analysis_specs(directory: str) -> Iterator[Tuple[str, str, Any]]:
"""Loads the analysis specifications from a file.
Expand Down Expand Up @@ -223,9 +256,9 @@ def upload_analysis(args: argparse.Namespace) -> Tuple[int, str]:
json.dumps({
'Data': base64.b64encode(zip_bytes).decode('utf-8'),
# The UserID is required by Panther for this API call, but we have no way of
# acquiring it and it isn't used for anything. This is a random, valid UUID so
# that the input can be validated by the API.
'UserID': 'c273fd96-88d0-41c4-a74e-941e17832915',
# acquiring it and it isn't used for anything. This is a valid UUID used by the
# Panther deployment tool to indicate this action was performed automatically.
'UserID': '00000000-0000-4000-8000-000000000000',
}),
}

Expand All @@ -239,6 +272,9 @@ def upload_analysis(args: argparse.Namespace) -> Tuple[int, str]:
response_payload = json.loads(response_str)

if response_payload['statusCode'] != 200:
logging.warning(
'Failed to upload to Panther\n\tstatus code: %s\n\terror message: %s',
response_payload['statusCode'], response_payload['body'])
return 1, ''

body = json.loads(response_payload['body'])
Expand All @@ -258,46 +294,35 @@ def test_analysis(args: argparse.Namespace) -> Tuple[int, list]:
Returns:
A tuple of the return code, and a list of tuples containing invalid specs and their error.
"""
invalid_specs = []
failed_tests: DefaultDict[str, list] = defaultdict(list)
tests: List[str] = []
logging.info('Testing analysis packs in %s\n', args.path)

# First import the globals file
# First classify each file
specs = list(load_analysis_specs(args.path))
for analysis_spec_filename, dir_name, analysis_spec in specs:
if (analysis_spec.get('PolicyID') or
analysis_spec['RuleID']) != 'aws_globals':
continue
global_analysis, analysis, invalid_specs = classify_analysis(specs)

# First import the globals
for analysis_spec_filename, dir_name, analysis_spec in global_analysis:
module, load_err = load_module(
os.path.join(dir_name, analysis_spec['Filename']))
# If the module could not be loaded, continue to the next
if load_err:
invalid_specs.append((analysis_spec_filename, load_err))
break
sys.modules['aws_globals'] = module
sys.modules['panther'] = module

# Next import each policy or rule and run its tests
for analysis_spec_filename, dir_name, analysis_spec in specs:
for analysis_spec_filename, dir_name, analysis_spec in analysis:
analysis_id = analysis_spec.get('PolicyID') or analysis_spec['RuleID']
if analysis_id == 'aws_globals':
continue

try:
SPEC_SCHEMA.validate(analysis_spec)
except (SchemaError, SchemaMissingKeyError, SchemaForbiddenKeyError,
SchemaUnexpectedTypeError) as err:
invalid_specs.append((analysis_spec_filename, err))
continue

print(analysis_id)

# Check if the PolicyID has already been loaded
# Check if the AnalysisID has already been loaded
if analysis_id in tests:
print('\t[ERROR] Conflicting PolicyID\n')
print('\t[ERROR] Conflicting AnalysisID\n')
invalid_specs.append(
(analysis_spec_filename,
'Conflicting PolicyID: {}'.format(analysis_id)))
'Conflicting AnalysisID: {}'.format(analysis_id)))
continue

module, load_err = load_module(
Expand All @@ -309,10 +334,9 @@ def test_analysis(args: argparse.Namespace) -> Tuple[int, list]:

tests.append(analysis_id)
if analysis_spec['AnalysisType'] == 'policy':
run_func = module.policy
failed_tests = run_tests(analysis_spec, module.policy, failed_tests)
elif analysis_spec['AnalysisType'] == 'rule':
run_func = module.rule
failed_tests = run_tests(analysis_spec, run_func, failed_tests)
failed_tests = run_tests(analysis_spec, module.rule, failed_tests)
print('')

for analysis_id in failed_tests:
Expand All @@ -325,6 +349,37 @@ def test_analysis(args: argparse.Namespace) -> Tuple[int, list]:
return int(bool(failed_tests or invalid_specs)), invalid_specs


def classify_analysis(
specs: List[Tuple[str, str, Any]]
) -> Tuple[List[Any], List[Any], List[Any]]:
# First determine the type of each file
global_analysis = []
analysis = []
invalid_specs = []

for analysis_spec_filename, dir_name, analysis_spec in specs:
try:
TYPE_SCHEMA.validate(analysis_spec)
if analysis_spec['AnalysisType'] == 'policy':
POLICY_SCHEMA.validate(analysis_spec)
analysis.append(
(analysis_spec_filename, dir_name, analysis_spec))
if analysis_spec['AnalysisType'] == 'rule':
RULE_SCHEMA.validate(analysis_spec)
analysis.append(
(analysis_spec_filename, dir_name, analysis_spec))
if analysis_spec['AnalysisType'] == 'global':
GLOBAL_SCHEMA.validate(analysis_spec)
global_analysis.append(
(analysis_spec_filename, dir_name, analysis_spec))
except (SchemaError, SchemaMissingKeyError, SchemaForbiddenKeyError,
SchemaUnexpectedTypeError) as err:
invalid_specs.append((analysis_spec_filename, err))
continue

return (global_analysis, analysis, invalid_specs)


def run_tests(analysis: Dict[str, Any], run_func: Callable[[TestCase], bool],
failed_tests: DefaultDict[str, list]) -> DefaultDict[str, list]:

Expand Down
17 changes: 13 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# functional dependencies
PyYAML==5.3.1
schema==0.7.1
boto3==1.12.26
schema==0.7.2
boto3==1.12.43
# ci dependencies
bandit==1.6.2
mypy==0.770
Expand All @@ -20,7 +20,8 @@ awscli==1.16.286
backcall==0.1.0
beautifulsoup4==4.8.1
binaryornot==0.4.4
botocore==1.15.26
bleach==3.1.3
botocore==1.15.43
bs4==0.0.1
certifi==2019.9.11
cfn-lint==0.26.0
Expand Down Expand Up @@ -56,6 +57,7 @@ jmespath==0.9.4
jsonpatch==1.24
jsonpointer==2.0
jsonschema==3.1.1
keyring==21.2.0
lazy-object-proxy==1.4.1
MarkupSafe==1.1.1
mccabe==0.6.1
Expand All @@ -64,22 +66,26 @@ mypy-extensions==0.4.3
nose==1.3.7
nose2==0.9.1
numpy==1.17.4
-e git+git@github.com:panther-labs/panther_analysis_tool.git@d5989a49109868811c3f15b84af21e8464efdeba#egg=panther_analysis_tool
parso==0.5.1
pbr==5.4.1
pexpect==4.7.0
pickleshare==0.7.5
pipenv==2018.11.26
pkginfo==1.5.0.1
policyuniverse==1.3.2.1
poyo==0.5.0
prompt-toolkit==2.0.10
ptyprocess==0.6.0
pyasn1==0.4.8
Pygments==2.4.2
Pygments==2.6.1
pyrsistent==0.15.4
python-dateutil==2.8.0
pytz==2019.3
readme-renderer==25.0
regex==2019.11.1
requests==2.22.0
requests-toolbelt==0.9.1
rsa==3.4.2
s3transfer==0.3.2
serverlessrepo==0.1.9
Expand All @@ -90,14 +96,17 @@ soupsieve==1.9.5
statistics==1.0.3.5
stevedore==1.30.1
suricata-update==1.1.0
tqdm==4.43.0
traitlets==4.3.3
twine==3.1.1
typed-ast==1.4.0
typing-extensions==3.7.4
tzlocal==2.0.0
urllib3==1.25.6
virtualenv==16.7.7
virtualenv-clone==0.5.3
wcwidth==0.1.7
webencodings==0.5.1
websocket-client==0.56.0
Werkzeug==0.16.0
whichcraft==0.6.1
Expand Down
Loading

0 comments on commit 5b99d96

Please sign in to comment.