diff --git a/.gitignore b/.gitignore index 08d00230..34a7d724 100644 --- a/.gitignore +++ b/.gitignore @@ -28,4 +28,9 @@ ehthumbs.db # Windows .vscode/ # OpenSearch alert 생성 시 생기는 임시 파일 -modules/opensearch/slack_response.json \ No newline at end of file +modules/opensearch/slack_response.json + + +# OpenSearch alert 생성 시 생기는 임시 파일 +modules/opensearch/slack_response.json + diff --git a/management-team-account/inspector-delegation/organizations/backend.tf b/management-team-account/inspector-delegation/organizations/backend.tf new file mode 100644 index 00000000..62f53b08 --- /dev/null +++ b/management-team-account/inspector-delegation/organizations/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "cloudfence-management-state" + key = "inspector-delegation/organizations.tfstate" + region = "ap-northeast-2" + encrypt = true + dynamodb_table = "s3-management-lock" + } +} \ No newline at end of file diff --git a/management-team-account/inspector-delegation/organizations/main.tf b/management-team-account/inspector-delegation/organizations/main.tf new file mode 100644 index 00000000..3f02a968 --- /dev/null +++ b/management-team-account/inspector-delegation/organizations/main.tf @@ -0,0 +1,26 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + +} + +provider "aws" { + region = "ap-northeast-2" +} + +provider "aws" { + alias = "operation" + region = "ap-northeast-2" +} + +data "aws_caller_identity" "operation" { + provider = aws.operation +} + +resource "aws_inspector2_delegated_admin_account" "this" { + account_id = data.aws_caller_identity.operation.account_id +} \ No newline at end of file diff --git a/modules/lambda_log_processor/lambda_function.py b/modules/lambda_log_processor/lambda_function.py new file mode 100644 index 00000000..f94ca1cb --- /dev/null +++ b/modules/lambda_log_processor/lambda_function.py @@ -0,0 +1,127 @@ +import os +import json +import gzip +import boto3 +import urllib.request +import urllib.error +import requests + +# OpenSearch로 로그 전송 +def send_to_opensearch(record: dict): + endpoint = os.environ['OPENSEARCH_URL'] + index = "security-alerts-" + record.get("eventName", "unknown").lower() + url = f"{endpoint}/{index}/_doc" + headers = {"Content-Type": "application/json"} + # 원하는 형태로 문서 구조를 정리 + doc = { + "@timestamp": record.get("eventTime"), + "eventName": record.get("eventName"), + "user": record.get("userIdentity", {}).get("arn"), + "sourceIP": record.get("sourceIPAddress"), + "awsRegion": record.get("awsRegion"), + "accountId": record.get("recipientAccountId"), + "raw": record + } + resp = requests.post(url, headers=headers, data=json.dumps(doc), timeout=5) + resp.raise_for_status() + + +def send_slack_alert(record: dict): + webhook_url = os.environ['SLACK_WEBHOOK_URL'] + user = record.get("userIdentity", {}).get("arn", "Unknown user") + event_name = record.get("eventName", "Unknown event") + source_ip = record.get("sourceIPAddress", "Unknown IP") + time = record.get("eventTime", "Unknown time") + region = record.get("awsRegion", "Unknown region") + account = record.get("recipientAccountId", "Unknown account") + + slack_payload = { + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": ":rotating_light: AWS Security Alert", + "emoji": True + } + }, + { + "type": "section", + "fields": [ + {"type": "mrkdwn", "text": f"*Event:*\n`{event_name}`"}, + {"type": "mrkdwn", "text": f"*User:*\n`{user}`"}, + {"type": "mrkdwn", "text": f"*Source IP:*\n`{source_ip}`"}, + {"type": "mrkdwn", "text": f"*Region:*\n`{region}`"}, + {"type": "mrkdwn", "text": f"*Account:*\n`{account}`"}, + {"type": "mrkdwn", "text": f"*Time:*\n`{time}`"} + ] + }, + { + "type": "divider" + } + ] + } + + data = json.dumps(slack_payload).encode('utf-8') + req = urllib.request.Request(webhook_url, data=data, headers={'Content-Type': 'application/json'}) + + try: + with urllib.request.urlopen(req, timeout=5) as response: + print("Slack message sent:", response.status) + except Exception as e: + print("Error sending Slack message:", str(e)) + + +def lambda_handler(event, context): + print("Received S3 PutObject event:", json.dumps(event, indent=2)) + + # 1) EventBridge detail 로부터 버킷과 키 추출 + detail = event.get("detail", {}) + bucket = detail.get("requestParameters", {}).get("bucketName") + key = detail.get("requestParameters", {}).get("key") + + if not bucket or not key: + print("Bucket or key missing in event detail") + return {"statusCode": 400, "body": json.dumps({"error": "Invalid event"})} + + # 2) S3에서 gzip된 CloudTrail 로그 파일 다운로드 및 파싱 + s3 = boto3.client("s3") + try: + obj = s3.get_object(Bucket=bucket, Key=key) + except Exception as e: + print(f"Error fetching object {bucket}/{key}: {e}") + return {"statusCode": 500, "body": json.dumps({"error": str(e)})} + + try: + with gzip.GzipFile(fileobj=obj["Body"]) as gz: + log_data = json.load(gz) + except Exception as e: + print(f"Error decompressing/parsing CloudTrail log: {e}") + return {"statusCode": 500, "body": json.dumps({"error": str(e)})} + + # 3) 각 레코드별 필터링 및 Slack 알림 + alert_events = { + "DeleteUser", "DeleteRole", "DeleteLoginProfile", + "StopLogging", "DeleteTrail", + "DeactivateMFADevice", "DeleteVirtualMFADevice", + "AuthorizeSecurityGroupIngress", "RevokeSecurityGroupIngress", + "AuthorizeSecurityGroupEgress", "RevokeSecurityGroupEgress", + "AttachUserPolicy", "DetachUserPolicy", + "PutUserPolicy", "DeleteUserPolicy", + "CreatePolicy", "DeletePolicy", + "RunInstances" + } + + for record in log_data.get("Records", []): + evt = record.get("eventName") + if evt in alert_events: + send_slack_alert(record) + try: + send_to_opensearch(record) + except Exception as e: + print(f"[Warning] OpenSearch indexing failed for {evt}: {e}") + + return { + "statusCode": 200, + "body": json.dumps({"message": "Processed S3 log and sent alerts."}) + } \ No newline at end of file diff --git a/modules/lambda_log_processor/lambda_package.zip b/modules/lambda_log_processor/lambda_package.zip new file mode 100644 index 00000000..c2200904 Binary files /dev/null and b/modules/lambda_log_processor/lambda_package.zip differ diff --git a/modules/lambda_log_processor/main.tf b/modules/lambda_log_processor/main.tf new file mode 100644 index 00000000..11ef8c82 --- /dev/null +++ b/modules/lambda_log_processor/main.tf @@ -0,0 +1,85 @@ +resource "aws_iam_role" "lambda_exec" { + name = "lambda-log-processor-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { Service = "lambda.amazonaws.com" } + Action = "sts:AssumeRole" + } + ] + }) +} + +resource "aws_iam_role_policy" "lambda_policy" { + name = "lambda-log-policy" + role = aws_iam_role.lambda_exec.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "AllowCloudWatchLogs" + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = "*" + }, + { + Sid = "AllowOpenSearchAccess" + Effect = "Allow" + Action = [ + "es:ESHttpPost", + "es:ESHttpPut", + "es:ESHttpGet" + ] + Resource = "${var.opensearch_domain_arn}/security-alerts-*/*" + }, + { + Sid = "AllowKMSDecrypt" + Effect = "Allow" + Action = ["kms:Decrypt"] + Resource = var.kms_key_arn + }, + { + Sid = "AllowS3Read" + Effect = "Allow" + Action = ["s3:GetObject"] + Resource = "${var.bucket_arn}/*" + } + ] + }) +} + +resource "aws_lambda_function" "log_processor" { + function_name = var.lambda_function_name + handler = "lambda_function.lambda_handler" + runtime = "python3.11" + role = aws_iam_role.lambda_exec.arn + timeout = 30 + memory_size = 256 + filename = var.lambda_zip_path + source_code_hash = filebase64sha256(var.lambda_zip_path) + + vpc_config { + subnet_ids = var.lambda_subnet_ids + security_group_ids = var.lambda_security_group_ids + } + + environment { + variables = { + SLACK_WEBHOOK_URL = var.slack_webhook_url + OPENSEARCH_URL = "https://${var.opensearch_endpoint}" + } + } +} + +resource "aws_iam_role_policy_attachment" "vpc_access" { + role = aws_iam_role.lambda_exec.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" +} \ No newline at end of file diff --git a/modules/lambda_log_processor/outputs.tf b/modules/lambda_log_processor/outputs.tf new file mode 100644 index 00000000..3a9ee18f --- /dev/null +++ b/modules/lambda_log_processor/outputs.tf @@ -0,0 +1,14 @@ +output "lambda_function_name" { + value = aws_lambda_function.log_processor.function_name + description = "Name of the deployed Lambda function" +} + +output "lambda_function_role_arn" { + value = aws_iam_role.lambda_exec.arn + description = "IAM Role ARN for Lambda execution" +} + +output "lambda_function_arn" { + value = aws_lambda_function.log_processor.arn + description = "ARN of the Lambda function" +} \ No newline at end of file diff --git a/modules/lambda_log_processor/variables.tf b/modules/lambda_log_processor/variables.tf new file mode 100644 index 00000000..cdb999ed --- /dev/null +++ b/modules/lambda_log_processor/variables.tf @@ -0,0 +1,45 @@ +variable "lambda_function_name" { + type = string + description = "Name of the Lambda function" +} + +variable "lambda_zip_path" { + type = string + description = "Path to the zipped Lambda package" +} + +variable "opensearch_domain_arn" { + type = string + description = "ARN of the OpenSearch domain" +} + +variable "opensearch_endpoint" { + type = string + description = "OpenSearch endpoint URL" +} + +variable "slack_webhook_url" { + type = string + description = "Slack Webhook URL" + sensitive = true +} + +variable "kms_key_arn" { + type = string + description = "KMS key for decrypting Slack secret (if encrypted)" +} + +variable "bucket_arn" { + description = "ARN of the S3 bucket for CloudTrail logs" + type = string +} + +variable "lambda_subnet_ids" { + description = "List of subnet IDs for the Lambda function to attach to the VPC" + type = list(string) +} + +variable "lambda_security_group_ids" { + description = "Security group IDs for the Lambda function in the VPC" + type = list(string) +} \ No newline at end of file diff --git a/modules/network_vpc/main.tf b/modules/network_vpc/main.tf new file mode 100644 index 00000000..a94fac9c --- /dev/null +++ b/modules/network_vpc/main.tf @@ -0,0 +1,73 @@ +resource "aws_vpc" "main" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_internet_gateway" "igw" { + vpc_id = aws_vpc.main.id +} + +resource "aws_subnet" "public" { + # tfsec:ignore:aws-ec2-no-public-ip-subnet + vpc_id = aws_vpc.main.id + cidr_block = "10.0.0.0/24" + map_public_ip_on_launch = true + availability_zone = "ap-northeast-2a" +} + +resource "aws_eip" "nat" { + domain = "vpc" +} + +resource "aws_nat_gateway" "nat" { + allocation_id = aws_eip.nat.id + subnet_id = aws_subnet.public.id + depends_on = [aws_internet_gateway.igw] +} + +resource "aws_subnet" "private" { + vpc_id = aws_vpc.main.id + cidr_block = "10.0.1.0/24" + availability_zone = "ap-northeast-2a" +} + +resource "aws_route_table" "private" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.nat.id + } +} + +resource "aws_route_table" "public" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.igw.id + } +} + +resource "aws_route_table_association" "public" { + subnet_id = aws_subnet.public.id + route_table_id = aws_route_table.public.id +} + +resource "aws_route_table_association" "private" { + subnet_id = aws_subnet.private.id + route_table_id = aws_route_table.private.id +} + +resource "aws_security_group" "allow_lambda" { + # tfsec:ignore:aws-ec2-no-public-egress-sgr + name = "lambda-security-group" + description = "Allow Lambda to access internet via NAT" + vpc_id = aws_vpc.main.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} diff --git a/modules/network_vpc/outputs.tf b/modules/network_vpc/outputs.tf new file mode 100644 index 00000000..937f5680 --- /dev/null +++ b/modules/network_vpc/outputs.tf @@ -0,0 +1,7 @@ +output "private_subnet_id" { + value = aws_subnet.private.id +} + +output "security_group_id" { + value = aws_security_group.allow_lambda.id +} \ No newline at end of file diff --git a/modules/opensearch/main.tf b/modules/opensearch/main.tf new file mode 100644 index 00000000..46347ed1 --- /dev/null +++ b/modules/opensearch/main.tf @@ -0,0 +1,43 @@ +resource "aws_opensearch_domain" "siem" { + domain_name = "siem-${var.domain_name}" + engine_version = var.engine_version + + cluster_config { + instance_type = var.cluster_instance_type + instance_count = var.cluster_instance_count + } + + ebs_options { + ebs_enabled = true + volume_size = var.ebs_volume_size + } + + encrypt_at_rest { + enabled = true + kms_key_id = var.kms_key_arn + } + + node_to_node_encryption { + enabled = true + } + + domain_endpoint_options { + enforce_https = true + tls_security_policy = "Policy-Min-TLS-1-2-2019-07" + } + + vpc_options { + subnet_ids = var.subnet_ids + security_group_ids = var.security_group_ids + } + + tags = { + Name = "siem-opensearch" + Environment = "dev" + Owner = "monitoring-team" + } +} + +output "endpoint" { + value = aws_opensearch_domain.siem.endpoint +} \ No newline at end of file diff --git a/modules/opensearch/outputs.tf b/modules/opensearch/outputs.tf new file mode 100644 index 00000000..964430be --- /dev/null +++ b/modules/opensearch/outputs.tf @@ -0,0 +1,14 @@ +output "domain_name" { + description = "Name of the OpenSearch domain" + value = aws_opensearch_domain.siem.domain_name +} + +output "domain_endpoint" { + description = "Endpoint of the OpenSearch domain" + value = aws_opensearch_domain.siem.endpoint +} + +output "domain_arn" { + description = "ARN of the OpenSearch domain" + value = aws_opensearch_domain.siem.arn +} \ No newline at end of file diff --git a/modules/opensearch/policy.tf b/modules/opensearch/policy.tf new file mode 100644 index 00000000..7750df64 --- /dev/null +++ b/modules/opensearch/policy.tf @@ -0,0 +1,23 @@ +resource "aws_opensearch_domain_policy" "siem_policy" { + domain_name = aws_opensearch_domain.siem.domain_name + + access_policies = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Principal = { + AWS = var.lambda_role_arn + }, + Action = [ + "es:ESHttpPut", + "es:ESHttpPost", + "es:ESHttpGet" + ], + Resource = [ + "${aws_opensearch_domain.siem.arn}/security-events-*/*" + ] + } + ] + }) +} \ No newline at end of file diff --git a/modules/opensearch/variables.tf b/modules/opensearch/variables.tf new file mode 100644 index 00000000..ca7929dc --- /dev/null +++ b/modules/opensearch/variables.tf @@ -0,0 +1,44 @@ +variable "domain_name" { + description = "OpenSearch domain name (without prefix)" + type = string +} + +variable "engine_version" { + description = "OpenSearch engine version" + type = string +} + +variable "cluster_instance_type" { + description = "Instance type for OpenSearch nodes" + type = string +} + +variable "cluster_instance_count" { + description = "Number of OpenSearch instances" + type = number +} + +variable "ebs_volume_size" { + description = "EBS volume size (GiB) for each OpenSearch node" + type = number +} + +variable "kms_key_arn" { + description = "KMS key ARN to encrypt OpenSearch data at rest" + type = string +} + +variable "lambda_role_arn" { + description = "IAM Role ARN that Lambda will assume for indexing into OpenSearch" + type = string +} + +variable "subnet_ids" { + description = "List of subnet IDs for the OpenSearch domain VPC configuration" + type = list(string) +} + +variable "security_group_ids" { + description = "List of security group IDs for the OpenSearch domain VPC configuration" + type = list(string) +} \ No newline at end of file diff --git a/modules/s3_cloudtrail_logs/main.tf b/modules/s3_cloudtrail_logs/main.tf new file mode 100644 index 00000000..996326a8 --- /dev/null +++ b/modules/s3_cloudtrail_logs/main.tf @@ -0,0 +1,163 @@ +data "aws_caller_identity" "current" {} +data "aws_caller_identity" "prod" {} + +resource "aws_kms_key" "cloudtrail" { + description = "KMS key for encrypting CloudTrail logs in S3" + deletion_window_in_days = 30 + + # 키 정책: 계정 루트 + CloudTrail 서비스 허용 + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + # 이 KMS 키를 만든 계정(root)이 모든 작업을 할 수 있도록 + { + Sid = "AllowAccountRootFullAccess" + Effect = "Allow" + Principal = { + AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root" + } + Action = "kms:*" + Resource = "*" + }, + + # CloudTrail 서비스가 이 키로 암호화 작업을 할 수 있도록 + { + Sid = "AllowCloudTrailUseOfKey" + Effect = "Allow" + Principal = { + Service = "cloudtrail.amazonaws.com" + } + Action = [ + "kms:GenerateDataKey*", + "kms:Decrypt" + ] + Resource = "*" + } + ] + }) +} + +resource "aws_kms_alias" "cloudtrail" { + name = var.kms_alias_name + target_key_id = aws_kms_key.cloudtrail.key_id +} + +resource "aws_s3_bucket" "logs" { + bucket = var.bucket_name +} + +resource "aws_s3_bucket_versioning" "logs" { + bucket = aws_s3_bucket.logs.id + + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "logs" { + bucket = aws_s3_bucket.logs.id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "aws:kms" + kms_master_key_id = aws_kms_key.cloudtrail.arn + } + } +} + +resource "aws_s3_bucket_public_access_block" "block" { + bucket = aws_s3_bucket.logs.id + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_policy" "cloudtrail" { + bucket = aws_s3_bucket.logs.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + # HTTPS 아닌 요청 모두 거부 + { + Sid = "DenyInsecureTransport" + Effect = "Deny" + Principal = "*" + Action = "s3:*" + Resource = [ + aws_s3_bucket.logs.arn, + "${aws_s3_bucket.logs.arn}/*" + ] + Condition = { + Bool = { "aws:SecureTransport" = "false" } + } + }, + # ACL 확인 허용 + { + Sid = "AllowCloudTrailAclCheck" + Effect = "Allow" + Principal = { Service = "cloudtrail.amazonaws.com" } + Action = "s3:GetBucketAcl" + Resource = aws_s3_bucket.logs.arn + }, + + # 로그 쓰기 + bucket-owner-full-control ACL 조건 + { + Sid = "AllowCloudTrailWrite" + Effect = "Allow" + Principal = { Service = "cloudtrail.amazonaws.com" } + Action = "s3:PutObject" + Resource = "${aws_s3_bucket.logs.arn}/AWSLogs/*" + Condition = { + StringEquals = { + "s3:x-amz-acl" = "bucket-owner-full-control" + } + } + }, + + # prod 계정의 WAF가 로그 쓸 수 있도록 허용 + { + Sid = "AllowProdWAFWrite" + Effect = "Allow" + Principal = { + AWS = "arn:aws:iam::${var.prod_account_id}:root" + } + Action = "s3:PutObject" + Resource = "${aws_s3_bucket.logs.arn}/AWSLogs/${var.prod_account_id}/*" + Condition = { + StringEquals = { + "s3:x-amz-acl" = "bucket-owner-full-control" + } + } + }, + + # prod 계정의 WAF가 ACL 조회 가능하도록 허용 + { + Sid = "AllowProdWAFAclCheck" + Effect = "Allow" + Principal = { + AWS = "arn:aws:iam::${var.prod_account_id}:root" + } + Action = "s3:GetBucketAcl" + Resource = aws_s3_bucket.logs.arn + } + + ] + }) +} + +resource "aws_s3_bucket_lifecycle_configuration" "logs" { + bucket = aws_s3_bucket.logs.id + + rule { + id = "expire-logs-after-30-days" + status = "Enabled" + + filter { prefix = "" } + + expiration { + days = 30 + } + } +} \ No newline at end of file diff --git a/modules/s3_cloudtrail_logs/outputs.tf b/modules/s3_cloudtrail_logs/outputs.tf new file mode 100644 index 00000000..2781a629 --- /dev/null +++ b/modules/s3_cloudtrail_logs/outputs.tf @@ -0,0 +1,11 @@ +output "bucket_name" { + value = aws_s3_bucket.logs.bucket +} + +output "bucket_arn" { + value = aws_s3_bucket.logs.arn +} + +output "kms_key_arn" { + value = aws_kms_key.cloudtrail.arn +} \ No newline at end of file diff --git a/modules/s3_cloudtrail_logs/variables.tf b/modules/s3_cloudtrail_logs/variables.tf new file mode 100644 index 00000000..2300b642 --- /dev/null +++ b/modules/s3_cloudtrail_logs/variables.tf @@ -0,0 +1,21 @@ +variable "bucket_name" { type = string } + +variable "aws_region" { + description = "Region where the KMS key is created" + type = string +} + +variable "management_account_id" { + description = "Account ID of the management account (for S3 bucket policy)" + type = string +} + +variable "kms_alias_name" { + description = "KMS key alias for CloudTrail logs" + type = string +} + +variable "prod_account_id" { + description = "The AWS account ID for the prod account" + type = string +} \ No newline at end of file diff --git a/operation-team-account/main.tf b/operation-team-account/main.tf new file mode 100644 index 00000000..852945e4 --- /dev/null +++ b/operation-team-account/main.tf @@ -0,0 +1,103 @@ +terraform { + required_version = ">= 1.1.0" + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + backend "s3" { + bucket = "cloudfence-operation-state" + key = "monitoring/terraform.tfstate" + region = "ap-northeast-2" + encrypt = true + dynamodb_table = "tfstate-operation-lock" + profile = "whs-sso-operation" + } +} + +provider "aws" { + region = var.aws_region + profile = "whs-sso-operation" +} + +provider "aws" { + alias = "management" + region = var.aws_region + profile = "whs-sso-management" +} + +data "aws_caller_identity" "current" {} + +# 기본(default) VPC 자동 조회 +data "aws_vpc" "default" { + default = true +} + +# 해당 VPC의 모든 서브넷 ID +data "aws_subnets" "default" { + filter { + name = "vpc-id" + values = [data.aws_vpc.default.id] + } +} + +# 해당 VPC의 default 보안 그룹 +data "aws_security_group" "default" { + name = "default" + vpc_id = data.aws_vpc.default.id +} + +data "aws_caller_identity" "management" { + provider = aws.management +} + +# 2) S3 모듈: CloudTrail 로그 버킷 + KMS +module "s3" { + source = "../modules/s3_cloudtrail_logs" + bucket_name = var.cloudtrail_bucket_name + aws_region = var.aws_region + kms_alias_name = var.kms_alias_name + management_account_id = data.aws_caller_identity.management.account_id +} + +# 3) OpenSearch 모듈: 도메인 생성 + 접근 정책 +module "opensearch" { + source = "../modules/opensearch" + domain_name = var.opensearch_domain_name + engine_version = var.opensearch_engine_version + cluster_instance_type = var.opensearch_instance_type + cluster_instance_count = var.opensearch_instance_count + ebs_volume_size = var.opensearch_ebs_size + kms_key_arn = module.s3.kms_key_arn + lambda_role_arn = module.lambda.lambda_function_role_arn + subnet_ids = [data.aws_subnets.default.ids[0]] + security_group_ids = [data.aws_security_group.default.id] +} + +# 4) Lambda 모듈: 로그 파싱 → OpenSearch + Slack 전송 +module "lambda" { + source = "../modules/lambda_log_processor" + lambda_function_name = "cloudtrail-log-processor" + lambda_zip_path = "../modules/lambda_log_processor/lambda_package.zip" + opensearch_domain_arn = module.opensearch.domain_arn + opensearch_endpoint = module.opensearch.endpoint + slack_webhook_url = var.slack_webhook_url + kms_key_arn = module.s3.kms_key_arn + bucket_arn = module.s3.bucket_arn + lambda_subnet_ids = [module.network.private_subnet_id] + lambda_security_group_ids = [module.network.security_group_id] +} + +# 5) EventBridge 모듈: S3 PutObject → Lambda 트리거 +module "eventbridge" { + source = "../modules/eventbridge_triggers" + bucket_name = module.s3.bucket_name + lambda_function_name = module.lambda.lambda_function_name + lambda_function_arn = module.lambda.lambda_function_arn +} + +# 6) network 모듈 호출 +module "network" { + source = "../modules/network_vpc" +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/eventbridge/backend.tf b/operation-team-account/runtime-verification/eventbridge/backend.tf new file mode 100644 index 00000000..596aa632 --- /dev/null +++ b/operation-team-account/runtime-verification/eventbridge/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "cloudfence-operation-state" + key = "runtime-verification/eventbridge.tfstate" + region = "ap-northeast-2" + dynamodb_table = "s3-operation-lock" + encrypt = true + } +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/eventbridge/main.tf b/operation-team-account/runtime-verification/eventbridge/main.tf new file mode 100644 index 00000000..3589cbf6 --- /dev/null +++ b/operation-team-account/runtime-verification/eventbridge/main.tf @@ -0,0 +1,51 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + +} + +provider "aws" { + region = "ap-northeast-2" +} + +data "terraform_remote_state" "lambda" { + backend = "s3" + config = { + bucket = "cloudfence-operation-state" + key = "runtime-verification/lambda.tfstate" + region = "ap-northeast-2" + dynamodb_table = "s3-operation-lock" + } +} + +resource "aws_cloudwatch_event_rule" "inspector_event_rule" { + name = "inspector-event-rule" + description = "Event rule for AWS Inspector findings" + + event_pattern = jsonencode({ + source = ["aws.inspector2"], + detail-type = ["Inspector2 Finding"], + detail = { + finding = { + severity = ["HIGH", "CRITICAL"], + } + } + }) +} + +resource "aws_cloudwatch_event_target" "inspector_event_target" { + rule = aws_cloudwatch_event_rule.inspector_event_rule.name + arn = data.terraform_remote_state.lambda.outputs.lambda_function_arn +} + +resource "aws_lambda_permission" "inspector_event_permission" { + statement_id = "AllowExecutionFromEventBridge" + action = "lambda:InvokeFunction" + function_name = data.terraform_remote_state.lambda.outputs.lambda_function_name + principal = "events.amazonaws.com" + source_arn = aws_cloudwatch_event_rule.inspector_event_rule.arn +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/iam/backend.tf b/operation-team-account/runtime-verification/iam/backend.tf new file mode 100644 index 00000000..8b820ee8 --- /dev/null +++ b/operation-team-account/runtime-verification/iam/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "cloudfence-operation-state" + key = "runtime-verification/iam.tfstate" + region = "ap-northeast-2" + dynamodb_table = "s3-operation-lock" + encrypt = true + } +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/iam/main.tf b/operation-team-account/runtime-verification/iam/main.tf new file mode 100644 index 00000000..d6431474 --- /dev/null +++ b/operation-team-account/runtime-verification/iam/main.tf @@ -0,0 +1,33 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + +} + +provider "aws" { + region = "ap-northeast-2" +} + +resource "aws_iam_role" "lambda_exec_role" { + name = "${var.project_name}-inspector-lambda-exec-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [{ + Action = "sts:AssumeRole", + Effect = "Allow", + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "lambda_logs" { + role = aws_iam_role.lambda_exec_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/iam/outputs.tf b/operation-team-account/runtime-verification/iam/outputs.tf new file mode 100644 index 00000000..a662f1bb --- /dev/null +++ b/operation-team-account/runtime-verification/iam/outputs.tf @@ -0,0 +1,4 @@ +output "lambda_exec_role_arn" { + description = "The ARN of the IAM role for the Lambda function" + value = aws_iam_role.lambda_exec_role.arn +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/iam/variables.tf b/operation-team-account/runtime-verification/iam/variables.tf new file mode 100644 index 00000000..155f3783 --- /dev/null +++ b/operation-team-account/runtime-verification/iam/variables.tf @@ -0,0 +1,5 @@ +variable "project_name" { + description = "The name of the project" + type = string + default = "cloudfence" +} diff --git a/operation-team-account/runtime-verification/inspector/backend.tf b/operation-team-account/runtime-verification/inspector/backend.tf new file mode 100644 index 00000000..cb20378c --- /dev/null +++ b/operation-team-account/runtime-verification/inspector/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "cloudfence-operation-state" + key = "runtime-verification/inspector.tfstate" + region = "ap-northeast-2" + dynamodb_table = "s3-operation-lock" + encrypt = true + } +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/inspector/main.tf b/operation-team-account/runtime-verification/inspector/main.tf new file mode 100644 index 00000000..cb6f1ffa --- /dev/null +++ b/operation-team-account/runtime-verification/inspector/main.tf @@ -0,0 +1,33 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + +} + +provider "aws" { + region = "ap-northeast-2" +} + +provider "aws" { + alias = "prod" + region = "ap-northeast-2" +} + +data "aws_caller_identity" "prod" { + provider = aws.prod +} + +data "aws_caller_identity" "current" {} + +resource "aws_inspector2_enabler" "this" { + account_ids = [data.aws_caller_identity.current.account_id] + resource_types = ["EC2"] +} + +resource "aws_inspector2_delegated_admin_account" "prod_account" { + account_id = data.aws_caller_identity.prod.account_id +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/inspector/variables.tf b/operation-team-account/runtime-verification/inspector/variables.tf new file mode 100644 index 00000000..c5d3e883 --- /dev/null +++ b/operation-team-account/runtime-verification/inspector/variables.tf @@ -0,0 +1,4 @@ +variable "prod_account_id" { + description = "prod-team-account" + type = string +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/lambda/backend.tf b/operation-team-account/runtime-verification/lambda/backend.tf new file mode 100644 index 00000000..59c89a02 --- /dev/null +++ b/operation-team-account/runtime-verification/lambda/backend.tf @@ -0,0 +1,9 @@ +terraform { + backend "s3" { + bucket = "cloudfence-operation-state" + key = "runtime-verification/lambda.tfstate" + region = "ap-northeast-2" + dynamodb_table = "s3-operation-lock" + encrypt = true + } +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/lambda/lambda_function.py b/operation-team-account/runtime-verification/lambda/lambda_function.py new file mode 100644 index 00000000..add59de3 --- /dev/null +++ b/operation-team-account/runtime-verification/lambda/lambda_function.py @@ -0,0 +1,44 @@ +import json +import os +import urllib.request + +SLACK_WEBHOOK_URL = os.environ.get('SLACK_WEBHOOK_URL', '') + +def lambda_handler(event, context): + print(json.dumps(event)) # 디버깅을 위해 전체 이벤트를 로그로 남깁니다. + + # Inspector 이벤트에서 필요한 정보 추출 + finding = event.get('detail', {}) + title = finding.get('title', 'N/A') + severity = finding.get('severity', 'N/A') + + resources = finding.get('resources', [{}]) + resource_id = resources[0].get('id', 'N/A') + + region = event.get('region', 'N/A') + finding_arn = finding.get('findingArn', '') + console_url = f"https://{region}.console.aws.amazon.com/inspector/v2/findings/details?finding-arn={finding_arn}" if finding_arn else "#" + + # 슬랙 메시지 구성 + slack_message = { + "text": f"*New High-Severity Inspector Finding*", + "blocks": [ + { "type": "header", "text": { "type": "plain_text", "text": f"{severity}: {title}" } }, + { "type": "section", "text": { "type": "mrkdwn", "text": f"*Affected Resource:*\n```{resource_id}```" } }, + { "type": "actions", "elements": [{ "type": "button", "text": { "type": "plain_text", "text": "View Finding Details" }, "url": console_url, "style": "primary" }] } + ] + } + + if not SLACK_WEBHOOK_URL: + print("Slack Webhook URL is not set.") + return {'statusCode': 500} + + req = urllib.request.Request(SLACK_WEBHOOK_URL, data=json.dumps(slack_message).encode('utf-8'), headers={'Content-Type': 'application/json'}) + + try: + with urllib.request.urlopen(req) as response: + print(f"Message posted to Slack, status: {response.status}") + except urllib.error.HTTPError as e: + print(f"Error posting to Slack: {e.code} {e.reason}") + + return {'statusCode': 200} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/lambda/lambda_function_payload.zip b/operation-team-account/runtime-verification/lambda/lambda_function_payload.zip new file mode 100644 index 00000000..4f0cdaba Binary files /dev/null and b/operation-team-account/runtime-verification/lambda/lambda_function_payload.zip differ diff --git a/operation-team-account/runtime-verification/lambda/main.tf b/operation-team-account/runtime-verification/lambda/main.tf new file mode 100644 index 00000000..1dcc6692 --- /dev/null +++ b/operation-team-account/runtime-verification/lambda/main.tf @@ -0,0 +1,38 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + +} + +provider "aws" { + region = "ap-northeast-2" +} + +data "terraform_remote_state" "iam" { + backend = "s3" + config = { + bucket = "cloudfence-operation-state" + key = "runtime-verification/iam.tfstate" + region = "ap-northeast-2" + dynamodb_table = "s3-operation-lock" + } +} + +resource "aws_lambda_function" "inspector_slack_notification" { + function_name = "inspector_slack_notification" + role = data.terraform_remote_state.iam.outputs.lambda_exec_role_arn + handler = "index.lambda_handler" + runtime = "python3.9" + + environment { + variables = { + SLACK_WEBHOOK_URL = var.slack_webhook_url + } + } + filename = "lambda_function_payload.zip" + source_code_hash = filebase64sha256("lambda_function_payload.zip") +} diff --git a/operation-team-account/runtime-verification/lambda/outputs.tf b/operation-team-account/runtime-verification/lambda/outputs.tf new file mode 100644 index 00000000..b6b761dd --- /dev/null +++ b/operation-team-account/runtime-verification/lambda/outputs.tf @@ -0,0 +1,9 @@ +output "lambda_function_arn" { + description = "The ARN of the Inspector Slack notification Lambda function" + value = aws_lambda_function.inspector_slack_notification.arn +} + +output "lambda_function_name" { + description = "The name of the Inspector Slack notification Lambda function" + value = aws_lambda_function.inspector_slack_notification.function_name +} \ No newline at end of file diff --git a/operation-team-account/runtime-verification/lambda/variables.tf b/operation-team-account/runtime-verification/lambda/variables.tf new file mode 100644 index 00000000..5ecd0a22 --- /dev/null +++ b/operation-team-account/runtime-verification/lambda/variables.tf @@ -0,0 +1,5 @@ +variable "slack_webhook_url" { + description = "The incoming webhook URL for Slack notifications" + type = string + sensitive = true +} \ No newline at end of file