-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtasks.py
241 lines (204 loc) · 7.75 KB
/
tasks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
import os
import json
import time
import boto3
import shutil
from invoke import task, Exit
from os import getenv as env
from dotenv import load_dotenv
from os.path import join, dirname, exists
from random import choice
load_dotenv(join(dirname(__file__), '.env'))
aws_profile = env('AWS_DEFAULT_PROFILE')
if aws_profile is not None:
boto3.setup_default_session(profile_name=aws_profile)
def getenv(var, required=True):
val = env(var)
if required and val is None:
raise Exit("{} not defined".format(var))
return val
def profile_arg():
profile = getenv("AWS_PROFILE", False)
if profile is not None:
return "--profile {}".format(profile)
return ""
def existing_stack(ctx):
cmd = ("aws {} cloudformation describe-stacks --stack-name {} "
"--query 'Stacks[0]'"
).format(profile_arg(), getenv('STACK_NAME'))
res = ctx.run(cmd, hide=True, warn=True, echo=False)
if res.exited == 0:
return json.loads(res.stdout)
def s3_zipfile_exists(ctx):
cmd = "aws {} s3 ls s3://{}/dce-transcript-indexer/{}-function.zip" \
.format(
profile_arg(),
getenv('LAMBDA_CODE_BUCKET'),
getenv('STACK_NAME')
)
res = ctx.run(cmd, hide=True, warn=True, echo=False)
return res.exited == 0
def find_cidr_base(ctx):
cmd = ("aws {} ec2 describe-vpcs "
"--query 'Vpcs[].CidrBlockAssociationSet[].CidrBlock'"
).format(profile_arg())
res = ctx.run(cmd, hide=True, echo=False)
taken = set([x[:x.rindex('.')] for x in json.loads(res.stdout)])
possible = set(["10.1.{}".format(x) for x in range(254)])
return choice(list(possible.difference(taken))) + ".0/24"
@task
def package(ctx):
"""
Package the function + dependencies into a zipfile and upload to s3 bucket created via `create-code-bucket`
"""
build_path = join(dirname(__file__), 'dist')
shutil.rmtree(build_path, ignore_errors=True)
os.makedirs(build_path)
print("installing dependencies to build path")
req_file = join(dirname(__file__), 'function_requirements.txt')
ctx.run("pip install -U -r {} -t {}".format(req_file, build_path))
for asset in ['function.py', 'index_template.json']:
asset_path = join(dirname(__file__), asset)
ctx.run("ln -s -f {} {}".format(asset_path, build_path))
print("packaging zip file")
zip_path = join(dirname(__file__), 'function.zip')
with ctx.cd(build_path):
ctx.run("zip -r {} .".format(zip_path))
print("uploading to s3")
s3_file_name = "{}-function.zip".format(getenv('STACK_NAME'))
ctx.run("aws {} s3 cp {} s3://{}/dce-transcript-indexer/{}".format(
profile_arg(),
zip_path,
getenv("LAMBDA_CODE_BUCKET"),
s3_file_name),
echo=True
)
@task
def update_function(ctx):
"""
Update the function code with the latest packaged zipfile in s3. Note: this will publish a new Lambda version.
"""
package(ctx)
s3_file_name = "dce-transcript-indexer/{}-function.zip".format(getenv('STACK_NAME'))
cmd = ("aws {} lambda update-function-code "
"--function-name {}-function --publish --s3-bucket {} "
"--s3-key {}"
).format(
profile_arg(),
getenv('STACK_NAME'),
getenv('LAMBDA_CODE_BUCKET'),
s3_file_name
)
ctx.run(cmd)
@task
def deploy(ctx):
"""
Create or update the CloudFormation stack. Note: you must run `package` first.
"""
template_path = join(dirname(__file__), 'template.yml')
if not s3_zipfile_exists(ctx):
print("No zipfile found in s3!")
print("Did you run the `package` command?")
raise Exit(1)
current_stack = existing_stack(ctx)
if current_stack is None:
operation = "create-stack"
cidr_block = find_cidr_base(ctx)
else:
operation = "create-change-set"
change_set_name = "change-set-{}".format(int(time.time()))
try:
cidr_block = next(
x["OutputValue"] for x in current_stack["Outputs"]
if x["OutputKey"] == "VpcCidrBlock"
)
except StopIteration:
print("Existing stack doesn't have a cidr block?!?!")
raise Exit(1)
cmd = ("aws {} cloudformation {} "
"--stack-name {} "
"--capabilities CAPABILITY_NAMED_IAM "
"--template-body file://{} "
"--tags Key=Project,Value=MH Key=OU,Value=DE Key=TranscriptIndexer,Value=1 "
"--parameters "
"ParameterKey=CidrBlock,ParameterValue='{}' "
"ParameterKey=LambdaCodeBucket,ParameterValue='{}' "
"ParameterKey=NotificationEmail,ParameterValue='{}' "
"ParameterKey=ElasticsearchInstanceType,ParameterValue='{}' "
"ParameterKey=LambdaTimeout,ParameterValue='{}' "
"ParameterKey=LambdaMemory,ParameterValue='{}' "
"--change-set-name {}"
).format(
profile_arg(),
operation,
getenv("STACK_NAME"),
template_path,
cidr_block,
getenv('LAMBDA_CODE_BUCKET'),
getenv('NOTIFICATION_EMAIL'),
getenv('ES_INSTANCE_TYPE'),
getenv('LAMBDA_TIMEOUT'),
getenv('LAMBDA_MEMORY'),
change_set_name
)
res = ctx.run(cmd, warn=True, echo=True)
if res.exited != 0 and "No updates" in res.stderr:
print("Stack is up-to-date!")
return
elif res.exited != 0:
raise Exit(res.stderr)
if current_stack is None:
wait_for = "stack-create-complete"
wait_for_name = "--stack-name {}".format(getenv('STACK_NAME'))
else:
wait_for = "change-set-create-complete"
wait_for_name = "--change-set-name ".format(change_set_name)
print("Waiting for deployment/update to complete...")
cmd = ("aws {} cloudformation wait {} "
"{}").format(profile_arg(), wait_for, wait_for_name)
ctx.run(cmd)
print("Done")
@task
def delete(ctx):
"""
Delete the CloudFormation stack
"""
cmd = ("aws {} cloudformation delete-stack "
"--stack-name {}").format(profile_arg(), getenv('STACK_NAME'))
if input('are you sure? [y/N] ').lower().strip().startswith('y'):
ctx.run(cmd)
print("Waiting for deletion to complete...")
cmd = ("aws --profile test cloudformation wait stack-delete-complete "
"--stack-name {}").format(getenv('STACK_NAME'))
ctx.run(cmd)
print("Done")
else:
print("not deleting stack")
@task
def init_index_template(ctx):
"""
Populate the Elasticsearch index template. Only has to happen once after initial stack creation.
"""
event_payload = '{ "init_index_template": true }'
cmd = ("aws {} lambda invoke --function-name {}-function "
"--invocation-type RequestResponse --log-type None "
"--payload '{}' /dev/null"
).format(profile_arg(), getenv("STACK_NAME"), event_payload)
ctx.run(cmd)
@task
def ssh_tunnel(ctx, opsworks_stack):
"""
Outputs an ssh command to establish a tunnel to the Elasticsearch instance.
"""
cmd = ("aws {} ec2 describe-instances --output text "
"--filters \"Name=tag:opsworks:stack,Values={}\" "
"--query \"Reservations[].Instances[?Tags[?Key=='opsworks:instance' && contains(Value, 'admin1')]].PublicIpAddress\" "
).format(profile_arg(), opsworks_stack)
instance_ip = ctx.run(cmd, hide=True).stdout.strip()
# get ES endpoint
cmd = ("aws {} cloudformation describe-stacks --stack-name {} "
"--query \"Stacks[].Outputs[?OutputKey=='DomainEndpoint'].OutputValue\" "
"--output text"
).format(profile_arg(), env('STACK_NAME'))
es_endpoint = ctx.run(cmd, hide=True).stdout.strip()
print("ssh -N -f -L 9200:{}:443 {}".format(es_endpoint, instance_ip))