diff --git a/.gitignore b/.gitignore index 899591b..640b582 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Compiled files *.tfstate *.tfstate.backup +*.bak # Module directory .terraform/ @@ -9,3 +10,4 @@ environments/* !environments/runnable-on-prem.example.tfvars +step-2-kops diff --git a/README.md b/README.md index e102c4c..1bc2322 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,121 @@ # Runnable On-Prem Terraform +### Dependencies + +``` +brew install terraform kops jq kubectl +``` + +[How to install homebrew](https://brew.sh/). + +### Step 1: Obtaining AWS Access Tokens + +1. https://console.aws.amazon.com/iam/home?region=us-east-2#/security_credential +2. Click create new access key + ``` +# TODO: Define permisisons export AWS_ACCESS_KEY_ID= export AWS_SECRET_ACCESS_KEY= -terraform get -terraform plan -terraform apply + +``` +[How to get access tokens](http://docs.aws.amazon.com/lambda/latest/dg/getting-started.html) + +### Step 2: Populating Variables + +Populate `environments/main.tfvars` with correct variables. + +The following are the only required variables: + +``` +# Domain to be used by Runnable. +# Access to setting DNS nameservers is required. +# Multiple subdomains must be set for this domain +domain = "runnable.com" +# A Github organization id (See below of obtaining ID) +github_org_id = "2828361" # Github ID for organization +# Location of previously generationg configuation +# Should be generated using github.com/CodeNow/on-prem-devops-scripts +lc_user_data_file_location = "~/dock-runnable-on-prem.sh" # File must be already generated +# Path to a public key (See below of generating public key) +public_key_path = "~/.ssh/*.pub" # A public key +``` + +##### Obtaining A Github ID} + +``` +curl -sS "https://api.github.com/orgs/${ORGNAME}" | jq '.id' +``` + +##### Obtaining A Public Key From Private Key + +``` +ssh-keygen -y -f ~/.ssh/${NAME}.pem >> ~/.ssh/${NAME}.pem.pub +``` + +##### Creating a New Public Key +``` +openssl req -newkey rsa:2048 -new -nodes -keyout key.pem +chmod 400 key.pem +ssh-keygen -y -f key.pem >> key.pem.pub +``` + +### Step 3: Init Terraform and Apply First Part + +``` +terraform init +# Bug in terraform requires explicitely requiring submodules https://github.com/hashicorp/terraform/issues/5190 +terraform apply -target=module.step_1.module.key_pair -target=module.step_1.module.vpc -target=module.step_1.module.route53 -target=module.step_1.module.s3 -var-file="environments/main.tfvars" ``` + +### Step 4: Update DNS + +Using the output of the command above you can see the DNS entry section, update your DNS to match these records. There should 4 entries. DNS nameservers need to be propagated before going on to the next step. + +### Step 5: Create Kops configuration + +[kops]() is a tool to automatically spin up + +``` +bash create-k8-cluster.bash environments/main.tfvars +``` + +### Step 6: Apply configuration + +Finally, it's time to create the infrastructure. This includes the kuberentes cluster, the auto scaling group for the dock workers, and the RDS database. + +If you want to review the resources to be created, first run `terraform plan -var-file="environments/main.tfvars"`. + +When you're ready to apply changes, just run + +``` +terraform apply -var-file="environments/main.tfvars" +``` + +### Step 7: Confirm Cluster is Up + +After finishing the setup, you can now test if the cluster is up by running the following command (This can take a few minutes). + +``` +kubectl get nodes +``` + +You should see something like this. It will take some time for nodes to appear as "Ready": + +``` +$ kubectl get nodes +NAME STATUS AGE VERSION +ip-10-10-34-129.us-west-2.compute.internal Ready,master 1h v1.5.7 +ip-10-10-57-73.us-west-2.compute.internal Ready 1h v1.5.7 +ip-10-10-61-76.us-west-2.compute.internal Ready 1h v1.5.7 +``` + +### Step 8: Add dashboard + +After cluster is ready, run the following command to run the dashboard: + +``` +kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.6.0.yaml +``` + +Then, run `kubectl proxy` and go to [`127.0.0.1:8001/ui/`](http://127.0.0.1:8001/ui) to test it. diff --git a/create-k8-cluster.bash b/create-k8-cluster.bash new file mode 100644 index 0000000..19f474c --- /dev/null +++ b/create-k8-cluster.bash @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# We need to run a refresh before we can run `terraform output` +terraform refresh -var-file=$1 > /dev/null + +JSON=$(terraform output -json) +REGION=$(echo $JSON | jq --raw-output '.aws_region.value') +ENV=$(echo $JSON | jq --raw-output '.environment.value') +VPC_ID=$(echo $JSON | jq --raw-output '.vpc_id.value') +BUCKET_NAME=$(echo $JSON | jq --raw-output '.kops_config_bucket.value') +CLUSTER_NAME=$(echo $JSON | jq --raw-output '.cluster_name.value') +SSH_PUBLIC_KEY_PATH=$(echo $JSON | jq --raw-output '.ssh_public_key_path.value') + +echo "Creating cluster in VPC $VPC_ID with name $CLUSTER_NAME" + +kops create cluster \ + --zones="${REGION}a" \ + --name=${CLUSTER_NAME} \ + --vpc=${VPC_ID} \ + --node-count=3 \ + --cloud=aws \ + --cloud-labels="Environment=${ENV}" \ + --ssh-public-key=${SSH_PUBLIC_KEY_PATH} \ + --state=s3://${BUCKET_NAME} \ + --node-size=m4.large \ + --master-size=m4.large \ + --out=./step-2-kops --target=terraform + +# Move file in order for it to be a valid module +mv ./step-2-kops/kubernetes.tf ./step-2-kops/main.tf diff --git a/database/main.tf b/database/main.tf deleted file mode 100644 index 748389d..0000000 --- a/database/main.tf +++ /dev/null @@ -1,35 +0,0 @@ -variable "environment" {} -variable "username" {} -variable "password" {} -variable "port" {} -variable "subnet_group_name" {} -variable "vpc_id" {} -variable "main_host_security_group_id" {} -variable "instance_class" {} - -resource "aws_security_group" "database_sg" { - name = "${var.environment}-database-sg" - description = "Allow inbound traffic from main host to DB port" - vpc_id = "${var.vpc_id}" - - ingress { - from_port = 5432 - to_port = 5432 - protocol = "tcp" - security_groups = ["${var.main_host_security_group_id}"] - } -} - -resource "aws_db_instance" "main_postgres_db" { - allocated_storage = 10 - engine = "postgres" - engine_version = "9.5.2" - instance_class = "${var.instance_class}" - name = "big_poppa" - username = "${var.username}" - password = "${var.password}" - port = "${var.port}" - db_subnet_group_name = "${var.subnet_group_name}" - vpc_security_group_ids = ["${aws_security_group.database_sg.id}"] - skip_final_snapshot = true -} diff --git a/environments/runnable-on-prem.example.tfvars b/environments/runnable-on-prem.example.tfvars index 8aea5f0..fb59ae6 100644 --- a/environments/runnable-on-prem.example.tfvars +++ b/environments/runnable-on-prem.example.tfvars @@ -1,15 +1,9 @@ # All variables in this document should match domain = "" +github_org_id = "" +public_key = "" db_username = "" # Must start with a letter db_password = "" -db_subnet_group_name = "" -main_host_vpc_id = "" -main_host_subnet_id = "" -main_host_private_ip = "10.4.0.100" -dock_subnet_id = "" -github_org_id = "" -key_name = "" lc_user_data_file_location = "~/dock-runnable-on-prem.sh" -bastion_sg_id = "" environment = "runnable-on-prem" aws_region = "us-west-2" diff --git a/instances-and-security-groups/main.tf b/instances-and-security-groups/main.tf deleted file mode 100644 index f3cbd78..0000000 --- a/instances-and-security-groups/main.tf +++ /dev/null @@ -1,185 +0,0 @@ -variable "environment" {} -variable "vpc_id" {} -variable "main_host_subnet_id" {} -variable "main_host_instance_type" {} -variable "dock_subnet_id" {} -variable "dock_instance_type" {} -variable "private_ip" {} -variable "github_org_id" {} -variable "lc_user_data_file_location" {} -variable "key_name" {} -variable "bastion_sg_id" {} - -# Changing AMI forces new resource and will delete all everything in main host -# Ovewrite this variable with previous AMI if update is pushed -variable "main_host_ami" { - default = "ami-5fa7353f" # singe-host-ami-build-v0.0.4 -} - -variable "dock_ami" { - default = "ami-557dee35" # dock-ami-build-v.0.0.8 -} - -resource "aws_security_group" "main_host_sg" { - name = "${var.environment}-main-host-sg" - description = "Allow all inbound traffic on all traffic over port 80" - vpc_id = "${var.vpc_id}" - - ingress { - from_port = 80 - to_port = 65535 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - security_groups = ["${var.bastion_sg_id}"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_security_group" "dock_sg" { - name = "${var.environment}-dock-sg" - description = "Allow all traffic from main host and between docks" - vpc_id = "${var.vpc_id}" - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - security_groups = ["${var.bastion_sg_id}"] - } - - ingress { - from_port = 32768 - to_port = 65535 - protocol = "tcp" - security_groups = ["${aws_security_group.main_host_sg.id}"] - } - - ingress { - from_port = 8200 - to_port = 8201 - protocol = "tcp" - security_groups = ["${aws_security_group.main_host_sg.id}"] - } - - ingress { - from_port = 4242 - to_port = 4242 - protocol = "tcp" - security_groups = ["${aws_security_group.main_host_sg.id}"] - } - - ingress { - from_port = 29006 - to_port = 29007 - protocol = "tcp" - security_groups = ["${aws_security_group.main_host_sg.id}"] - } - - ingress { - from_port = 3100 - to_port = 3100 - protocol = "tcp" - security_groups = ["${aws_security_group.main_host_sg.id}"] - } - - ingress { - from_port = 6783 - to_port = 6783 - protocol = "tcp" - self = true - } - - ingress { - from_port = 6783 - to_port = 6783 - protocol = "udp" - self = true - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_instance" "main-instance" { - ami = "${var.main_host_ami}" - instance_type = "${var.main_host_instance_type}" - associate_public_ip_address = true - private_ip = "${var.private_ip}" - vpc_security_group_ids = ["${aws_security_group.main_host_sg.id}"] - subnet_id = "${var.main_host_subnet_id}" - key_name = "${var.key_name}" - - tags { - Name = "${var.environment}-main" - } -} - -resource "aws_launch_configuration" "dock_lc" { - name_prefix = "${var.environment}-dock-lc-" - image_id = "${var.dock_ami}" - instance_type = "${var.dock_instance_type}" - user_data = "${file("${var.lc_user_data_file_location}")}" - key_name = "${var.key_name}" - security_groups = ["${aws_security_group.dock_sg.id}"] - - root_block_device { - volume_size = 10 - } - - ebs_block_device { - device_name = "/dev/sdb" - snapshot_id = "snap-c77705e9" - volume_size = 50 - } - - lifecycle { - create_before_destroy = true - } -} - -resource "aws_autoscaling_group" "dock-auto-scaling-group" { - name = "asg-${var.environment}-${var.github_org_id}" - max_size = 30 - min_size = 2 - health_check_grace_period = 300 - health_check_type = "EC2" - desired_capacity = 2 # Start off with 0 and increase manually when main host is running - vpc_zone_identifier = ["${var.dock_subnet_id}"] - launch_configuration = "${aws_launch_configuration.dock_lc.name}" - - lifecycle { - create_before_destroy = true - } - - tag { - key = "org" - value = "${var.github_org_id}" - propagate_at_launch = true - } - - tag { - key = "enviroment" - value = "${var.environment}" - propagate_at_launch = true - } -} - -output "main_security_group_id" { - value = "${aws_security_group.main_host_sg.id}" -} diff --git a/main.tf b/main.tf index 843623f..30db3db 100644 --- a/main.tf +++ b/main.tf @@ -1,37 +1,151 @@ +terraform { + backend "s3" {} +} + provider "aws" { region = "${var.aws_region}" } -module "s3" { - source = "./s3" - domain = "${var.domain}" - environment = "${var.environment}" - force_destroy = "${var.force_destroy_s3_buckets}" +module "step_1" { + source = "./step-1" + environment = "${var.environment}" + public_key_path = "${var.public_key_path}" + aws_region = "${var.aws_region}" + domain = "${var.domain}" + force_destroy_s3_buckets = "${var.force_destroy_s3_buckets}" +} + +module "step_2_kops" { + source = "./step-2-kops/" } -module "instances-and-security-groups" { - source = "./instances-and-security-groups" +module "security_groups" { + source = "./modules/security-groups" + environment = "${var.environment}" + vpc_id = "${module.step_1.main_vpc_id}" + cluster_sg_ids = "${concat(module.step_2_kops.node_security_group_ids, module.step_2_kops.master_security_group_ids)}" +} + +module "subnets" { + source = "./modules/subnets" + environment = "${var.environment}" + region = "${var.aws_region}" + vpc_id = "${module.step_1.main_vpc_id}" + cluster_subnet_id = "${module.step_2_kops.node_subnet_ids[0]}" # Currently only handle one subnet for cluster +} + +module "bastion" { + source = "./modules/bastion" + environment = "${var.environment}" + sg_id = "${module.security_groups.bastion_sg_id}" + subnet_id = "${module.subnets.cluster_subnet_id}" + key_name = "${module.step_1.key_pair_name}" +} + +module "nat-gateway" { + source = "./modules/nat-gateway" + environment = "${var.environment}" + vpc_id = "${module.step_1.main_vpc_id}" + subnet_id = "${module.subnets.cluster_subnet_id}" +} + +module "routing-tables" { + source = "./modules/routing-tables" + environment = "${var.environment}" + dock_nat_id = "${module.nat-gateway.dock_nat_gateway_id}" + vpc_id = "${module.step_1.main_vpc_id}" +} + +module "instances" { + source = "./modules/instances" environment = "${var.environment}" - vpc_id = "${var.main_host_vpc_id}" - main_host_subnet_id = "${var.main_host_subnet_id}" - dock_subnet_id = "${var.dock_subnet_id}" - private_ip = "${var.main_host_private_ip}" + dock_subnet_id = "${module.subnets.dock_subnet_id}" github_org_id = "${var.github_org_id}" lc_user_data_file_location = "${var.lc_user_data_file_location}" - key_name = "${var.key_name}" - bastion_sg_id = "${var.bastion_sg_id}" - main_host_instance_type = "${var.main_host_instance_type}" + key_name = "${module.step_1.key_pair_name}" dock_instance_type = "${var.dock_instance_type}" + dock_sg_id = "${module.security_groups.dock_sg_id}" } module "database" { - source = "./database" - environment = "${var.environment}" - username = "${var.db_username}" - password = "${var.db_password}" - port = "${var.db_port}" - subnet_group_name = "${var.db_subnet_group_name}" - main_host_security_group_id = "${module.instances-and-security-groups.main_security_group_id}" - vpc_id = "${var.main_host_vpc_id}" - instance_class = "${var.db_instance_class}" + source = "./modules/database" + environment = "${var.environment}" + port = "${var.db_port}" + subnet_group_name = "${module.subnets.database_subnet_group_name}" + security_group_id = "${module.security_groups.db_sg_id}" + instance_class = "${var.db_instance_class}" +} + +output "environment" { + value = "${var.environment}" +} + +output "vpc_id" { + value = "${module.step_1.main_vpc_id}" +} + +output "cluster_subnet_id" { + value = "${module.subnets.cluster_subnet_id}" +} + +output "dock_subnet_id" { + value = "${module.subnets.dock_subnet_id}" +} + +output "database_subnet_group_name" { + value = "${module.subnets.database_subnet_group_name}" +} + +output "key_pair_name" { + value = "${module.step_1.key_pair_name}" +} + +output "aws_region" { + value = "${var.aws_region}" +} + +output "postgres_user" { + value = "${module.database.username}" +} + +output "postgres_password" { + value = "${module.database.password}" + sensitive = true +} + +output "postgres_host" { + value = "${module.database.host}" + sensitive = true +} + +output "dns_nameservers" { + value = "${module.step_1.dns_nameservers}" +} + +output "main_host_private_ip" { + value = "${var.main_host_private_ip}" +} + +output "kops_config_bucket" { + value = "${module.step_1.kops_config_bucket}" +} + +output "dock_subnet_cidr" { + value = "${module.subnets.dock_subnet_cidr}" +} + +output "cluster_name" { + value = "${module.step_1.cluster_name}" +} + +output "ssh_public_key_path" { + value = "${var.public_key_path}" +} + +output "bastion_ip_address" { + value = "${module.bastion.ip_address}" +} + +output "kube_cluster_sg_ids" { + value = "${concat(module.step_2_kops.node_security_group_ids, module.step_2_kops.master_security_group_ids)}" } diff --git a/modules/bastion/main.tf b/modules/bastion/main.tf new file mode 100644 index 0000000..ec6ccaf --- /dev/null +++ b/modules/bastion/main.tf @@ -0,0 +1,23 @@ +variable "environment" {} +variable "subnet_id" {} +variable "key_name" {} +variable "sg_id" {} + +resource "aws_instance" "bastion_instance" { + ami = "ami-5189a661" + instance_type = "t2.micro" + vpc_security_group_ids = ["${var.sg_id}"] + subnet_id = "${var.subnet_id}" + key_name = "${var.key_name}" + + associate_public_ip_address = true + source_dest_check = false + + tags { + Name = "${var.environment}-bastion" + } +} + +output "ip_address" { + value = "${aws_instance.bastion_instance.private_ip}" +} diff --git a/modules/database/main.tf b/modules/database/main.tf new file mode 100644 index 0000000..bbf56dc --- /dev/null +++ b/modules/database/main.tf @@ -0,0 +1,38 @@ +variable "environment" {} +variable "username" { + default = "runnable" +} +variable "port" {} +variable "subnet_group_name" {} +variable "security_group_id" {} +variable "instance_class" {} + +resource "random_id" "password" { + byte_length = "20" +} + +resource "aws_db_instance" "main_postgres_db" { + allocated_storage = 10 + engine = "postgres" + engine_version = "9.5.2" + instance_class = "${var.instance_class}" + name = "big_poppa" + username = "${var.username}" + password = "${random_id.password.b64}" + port = "${var.port}" + db_subnet_group_name = "${var.subnet_group_name}" + vpc_security_group_ids = ["${var.security_group_id}"] + skip_final_snapshot = true +} + +output "username" { + value = "${var.username}" +} + +output "password" { + value = "${random_id.password.b64}" +} + +output "host" { + value = "${aws_db_instance.main_postgres_db.address}" +} diff --git a/modules/instances/main.tf b/modules/instances/main.tf new file mode 100644 index 0000000..52b8e86 --- /dev/null +++ b/modules/instances/main.tf @@ -0,0 +1,66 @@ +variable "environment" {} +variable "dock_subnet_id" {} +variable "dock_instance_type" {} +variable "github_org_id" {} +variable "lc_user_data_file_location" {} +variable "key_name" {} +variable "dock_sg_id" {} + +# Changing AMI forces new resource and will delete all everything in main host +# Ovewrite this variable with previous AMI if update is pushed +variable "main_host_ami" { + default = "ami-5fa7353f" # singe-host-ami-build-v0.0.4 +} + +variable "dock_ami" { + default = "ami-557dee35" # dock-ami-build-v.0.0.8 +} + +resource "aws_launch_configuration" "dock_lc" { + name_prefix = "${var.environment}-dock-lc-" + image_id = "${var.dock_ami}" + instance_type = "${var.dock_instance_type}" + user_data = "${file("${var.lc_user_data_file_location}")}" + key_name = "${var.key_name}" + security_groups = ["${var.dock_sg_id}"] + + root_block_device { + volume_size = 10 + } + + ebs_block_device { + device_name = "/dev/sdb" + snapshot_id = "snap-c77705e9" + volume_size = 50 + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_autoscaling_group" "dock_auto_scaling_group" { + name = "asg-${var.environment}-${var.github_org_id}" + max_size = 30 + min_size = 0 + health_check_grace_period = 300 + health_check_type = "EC2" + vpc_zone_identifier = ["${var.dock_subnet_id}"] + launch_configuration = "${aws_launch_configuration.dock_lc.name}" + + lifecycle { + create_before_destroy = true + } + + tag { + key = "org" + value = "${var.github_org_id}" + propagate_at_launch = true + } + + tag { + key = "enviroment" + value = "${var.environment}" + propagate_at_launch = true + } +} diff --git a/modules/keypair/main.tf b/modules/keypair/main.tf new file mode 100644 index 0000000..a6a56ef --- /dev/null +++ b/modules/keypair/main.tf @@ -0,0 +1,11 @@ +variable "public_key_path" {} +variable "environment" {} + +resource "aws_key_pair" "main_key" { + key_name = "${var.environment}-key-pair" + public_key = "${file("${var.public_key_path}")}" +} + +output "key_pair_name" { + value = "${aws_key_pair.main_key.key_name}" +} diff --git a/modules/nat-gateway/main.tf b/modules/nat-gateway/main.tf new file mode 100644 index 0000000..e4c56b5 --- /dev/null +++ b/modules/nat-gateway/main.tf @@ -0,0 +1,20 @@ +variable "environment" {} +variable "vpc_id" {} +variable "subnet_id" {} + +resource "aws_eip" "dock_nat_eip" { + vpc = true +} + +resource "aws_nat_gateway" "dock_nat" { + allocation_id = "${aws_eip.dock_nat_eip.id}" + subnet_id = "${var.subnet_id}" +} + +output "dock_nat_eip" { + value = "${aws_eip.dock_nat_eip.public_ip}" +} + +output "dock_nat_gateway_id" { + value = "${aws_nat_gateway.dock_nat.id}" +} diff --git a/modules/route53/main.tf b/modules/route53/main.tf new file mode 100644 index 0000000..6b4dcfc --- /dev/null +++ b/modules/route53/main.tf @@ -0,0 +1,39 @@ +variable "environment" {} +variable "domain" {} +variable "force_destroy" {} + +resource "aws_route53_zone" "main" { + name = "kubernetes.${var.domain}" + force_destroy = "${var.force_destroy}" + + tags { + Environment = "${var.environment}" + } +} + +resource "aws_route53_record" "main-ns" { + zone_id = "${aws_route53_zone.main.zone_id}" + name = "kubernetes.${var.domain}" + type = "NS" + ttl = "30" + + records = [ + "${aws_route53_zone.main.name_servers.0}", + "${aws_route53_zone.main.name_servers.1}", + "${aws_route53_zone.main.name_servers.2}", + "${aws_route53_zone.main.name_servers.3}", + ] +} + +output "nameservers" { + value = [ + "${aws_route53_zone.main.name_servers.0}", + "${aws_route53_zone.main.name_servers.1}", + "${aws_route53_zone.main.name_servers.2}", + "${aws_route53_zone.main.name_servers.3}", + ] +} + +output "cluster_name" { + value = "${aws_route53_zone.main.name}" +} diff --git a/modules/routing-tables/main.tf b/modules/routing-tables/main.tf new file mode 100644 index 0000000..e1e66cb --- /dev/null +++ b/modules/routing-tables/main.tf @@ -0,0 +1,23 @@ +variable "vpc_id" {} +variable "dock_nat_id" {} +variable "environment" {} + +resource "aws_route_table" "docks_route_table" { + vpc_id = "${var.vpc_id}" + + tags { + Name = "Docks route table" + Environment = "${var.environment}" + } +} + +resource "aws_route" "private_route" { + route_table_id = "${aws_route_table.docks_route_table.id}" + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = "${var.dock_nat_id}" +} + +resource "aws_main_route_table_association" "docks_route_table" { + vpc_id = "${var.vpc_id}" + route_table_id = "${aws_route_table.docks_route_table.id}" +} diff --git a/s3/main.tf b/modules/s3/main.tf similarity index 85% rename from s3/main.tf rename to modules/s3/main.tf index 2c2411b..b70ed65 100644 --- a/s3/main.tf +++ b/modules/s3/main.tf @@ -85,3 +85,20 @@ resource "aws_s3_bucket" "registry" { Description = "Bucket to service as the registry backend" } } + +resource "aws_s3_bucket" "kops_config" { + bucket = "runnable.kops-config.${var.environment}" + force_destroy = "${var.force_destroy}" + + tags { + Description = "Bucket to store kops configuration" + } + + versioning { + enabled = true + } +} + +output "kops_config_bucket" { + value = "${aws_s3_bucket.kops_config.bucket}" +} diff --git a/modules/security-groups/main.tf b/modules/security-groups/main.tf new file mode 100644 index 0000000..37295f7 --- /dev/null +++ b/modules/security-groups/main.tf @@ -0,0 +1,122 @@ +variable "environment" {} +variable "vpc_id" {} +variable "cluster_sg_ids" { type = "list" } + +resource "aws_security_group" "bastion_sg" { + name = "${var.environment}-bastion-sg" + description = "Allow ssh access through this box" + vpc_id = "${var.vpc_id}" + + ingress = { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + self = false + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_security_group" "dock_sg" { + name = "${var.environment}-dock-sg" + description = "Allow all traffic from kube cluster host and between docks" + vpc_id = "${var.vpc_id}" + + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + security_groups = ["${aws_security_group.bastion_sg.id}"] + } + + ingress { + from_port = 32768 + to_port = 65535 + protocol = "tcp" + security_groups = ["${var.cluster_sg_ids}"] + } + + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + security_groups = ["${var.cluster_sg_ids}"] + } + + ingress { + from_port = 4242 + to_port = 4242 + protocol = "tcp" + security_groups = ["${var.cluster_sg_ids}"] + } + + ingress { + from_port = 29006 + to_port = 29007 + protocol = "tcp" + security_groups = ["${var.cluster_sg_ids}"] + } + + ingress { + from_port = 3100 + to_port = 3100 + protocol = "tcp" + security_groups = ["${var.cluster_sg_ids}"] + } + + ingress { + from_port = 6783 + to_port = 6783 + protocol = "tcp" + self = true + } + + ingress { + from_port = 6783 + to_port = 6783 + protocol = "udp" + self = true + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_security_group" "database_sg" { + name = "${var.environment}-database-sg" + description = "Allow inbound traffic from kube cluster to DB port" + vpc_id = "${var.vpc_id}" + + ingress { + from_port = 5432 + to_port = 5432 + protocol = "tcp" + security_groups = ["${var.cluster_sg_ids}"] + } +} + +output "bastion_sg_id" { + value = "${aws_security_group.bastion_sg.id}" +} + +output "db_sg_id" { + value = "${aws_security_group.database_sg.id}" +} + +output "cluster_sg_ids" { + value = "${var.cluster_sg_ids}" +} + +output "dock_sg_id" { + value = "${aws_security_group.dock_sg.id}" +} diff --git a/modules/subnets/main.tf b/modules/subnets/main.tf new file mode 100644 index 0000000..b024547 --- /dev/null +++ b/modules/subnets/main.tf @@ -0,0 +1,46 @@ +variable "environment" {} +variable "vpc_id" {} +variable "region" {} +variable "cluster_subnet_id" {} + +resource "aws_subnet" "dock_subnet" { + vpc_id = "${var.vpc_id}" + cidr_block = "10.10.2.0/24" + availability_zone = "${var.region}b" + + tags { + Name = "${var.environment}-dock-subnet" + Environment = "${var.environment}" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_db_subnet_group" "database_subnet_group" { + name = "${var.environment}-database-subnet-group" + # NOTE: What subnets should this have? + subnet_ids = ["${var.cluster_subnet_id}", "${aws_subnet.dock_subnet.id}"] + + tags { + Name = "${var.environment}-database-subnet-group" + Environment = "${var.environment}" + } +} + +output "cluster_subnet_id" { + value = "${var.cluster_subnet_id}" +} + +output "dock_subnet_id" { + value = "${aws_subnet.dock_subnet.id}" +} + +output "dock_subnet_cidr" { + value = "${aws_subnet.dock_subnet.cidr_block}" +} + +output "database_subnet_group_name" { + value = "${aws_db_subnet_group.database_subnet_group.name}" +} diff --git a/modules/vpc/main.tf b/modules/vpc/main.tf new file mode 100644 index 0000000..15274a1 --- /dev/null +++ b/modules/vpc/main.tf @@ -0,0 +1,44 @@ +variable "environment" {} + +resource "aws_vpc" "main" { + cidr_block = "10.10.0.0/16" + enable_dns_hostnames = true + + tags { + Name = "${var.environment}-main" + } +} + +resource "aws_internet_gateway" "main" { + vpc_id = "${aws_vpc.main.id}" + + tags { + Name = "${var.environment}-main-ig" + } +} + +resource "aws_route_table" "main" { + vpc_id = "${aws_vpc.main.id}" + + route { + cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.main.id}" + } + + tags { + Name = "${var.environment}-main-route-table" + } +} + +resource "aws_main_route_table_association" "main_route_table" { + vpc_id = "${aws_vpc.main.id}" + route_table_id = "${aws_route_table.main.id}" +} + +output "main_vpc_id" { + value = "${aws_vpc.main.id}" +} + +output "public_route_table_id" { + value = "${aws_route_table.main.id}" +} diff --git a/step-1/main.tf b/step-1/main.tf new file mode 100644 index 0000000..ebe8a0d --- /dev/null +++ b/step-1/main.tf @@ -0,0 +1,50 @@ +variable environment {} +variable public_key_path {} +variable aws_region {} +variable domain {} +variable force_destroy_s3_buckets {} + +module "key_pair" { + source = "../modules/keypair" + environment = "${var.environment}" + public_key_path = "${var.public_key_path}" +} + +module "vpc" { + source = "../modules/vpc" + environment = "${var.environment}" +} + +module "route53" { + source = "../modules/route53" + domain = "${var.domain}" + environment = "${var.environment}" + force_destroy = "${var.force_destroy_s3_buckets}" +} + +module "s3" { + source = "../modules/s3" + domain = "${var.domain}" + environment = "${var.environment}" + force_destroy = "${var.force_destroy_s3_buckets}" +} + +output "main_vpc_id" { + value = "${module.vpc.main_vpc_id}" +} + +output "key_pair_name" { + value = "${module.key_pair.key_pair_name}" +} + +output "dns_nameservers" { + value = "${module.route53.nameservers}" +} + +output "cluster_name" { + value = "${module.route53.cluster_name}" +} + +output "kops_config_bucket" { #TODO: Update + value = "${module.s3.kops_config_bucket}" +} diff --git a/variables.tf b/variables.tf index 6005f5d..1cf5613 100644 --- a/variables.tf +++ b/variables.tf @@ -1,6 +1,5 @@ # General - variable "aws_region" { description = "AWS region to launch servers." default = "us-west-2" @@ -8,7 +7,7 @@ variable "aws_region" { } variable "environment" { - description = "Name given to the enviroment in which Runnable is being deployed. This can be any name. It is used in multiple places to name resources." + description = "Name given to the environment in which Runnable is being deployed. This can be any name. It is used in multiple places to name resources." default = "runnable-on-prem" type = "string" } @@ -18,25 +17,22 @@ variable "domain" { type = "string" } -# S3 Buckets +# Key Pair -variable "force_destroy_s3_buckets" { - description = "Forces destroy of S3 buckets and deletes all their content. Default to false. Use this only when tearing down an environment. Before running `terraform destroy`, `terraform apply` must be run to updates buckets." +variable "public_key_path" { + description = "Path to public key for key which will be used for sshing into instances through bastion" type = "string" - default = "false" # https://www.terraform.io/docs/configuration/variables.html#booleans } -# Databases +# S3 Buckets -variable "db_username" { - description = "Username for RDS Postgres instance" +variable "force_destroy_s3_buckets" { + description = "Forces destroy of S3 buckets and deletes all their content. Default to false. Use this only when tearing down an environment. Before running `terraform destroy`, `terraform apply` must be run to updates buckets." type = "string" + default = "true" # https://www.terraform.io/docs/configuration/variables.html#booleans } -variable "db_password" { - description = "Password for RDS Postgres instance" - type = "string" -} +# Databases variable "db_port" { description = "Port for RDS Postgres instance" @@ -44,11 +40,6 @@ variable "db_port" { type = "string" } -variable "db_subnet_group_name" { - description = "Subnet in which database will be created" - type = "string" -} - variable "db_instance_class" { description = "Type of instance that will be used for database" type = "string" @@ -57,24 +48,9 @@ variable "db_instance_class" { # EC2 Instances -variable "main_host_vpc_id" { - description = "VPC in which security groups and instance for main host will be created." - type = "string" -} - -variable "main_host_subnet_id" { - description = "Subnet in which main host EC2 instance will be created. Subnet must be part of VPC in `main_host_vpc_id`" - type = "string" -} - -variable "bastion_sg_id" { - description = "Security group id for bastion instance" - type = "string" -} - variable "main_host_private_ip" { description = "Private IP address in VPC for main-host. This is important because ip address is encoded in launch configuration for docks." - default = "10.4.0.100" + default = "10.10.1.100" type = "string" } @@ -84,11 +60,6 @@ variable "main_host_instance_type" { default = "m4.2xlarge" } -variable "dock_subnet_id" { - description = "Subnet in which dock EC2 instance will be created. Subnet must be part of VPC in `main_host_vpc_id`" - type = "string" -} - variable "dock_instance_type" { description = "Type of instance that will be used for all docks" type = "string" @@ -104,8 +75,3 @@ variable "lc_user_data_file_location" { description = "Location for file generated for launch configuration. This file needs to have correct IPs, ports, and files" type = "string" } - -variable "key_name" { - description = "Name of ssh key to be used for accessing all instances" - type = "string" -}