forked from mredar/ingest_deploy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Vagrantfile
311 lines (266 loc) · 11.1 KB
/
Vagrantfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# All Vagrant configuration is done here. The most common configuration
# options are documented and commented below. For a complete reference,
# please see the online documentation at vagrantup.com.
$front_ip = "10.0.0.10"
$couchdb_ip = "10.0.0.11"
$solr_ip = "10.0.0.12"
$record_ips = <<FOO
echo #{$front_ip} > /ip-front
echo #{$couchdb_ip} > /ip-couchdb
echo #{$solr_ip} > /ip-solr
FOO
config.vm.define "ingestfront", primary: true do |ingestfront|
# Every Vagrant virtual environment requires a box to build off of.
ingestfront.vm.box = "ingest_base"
# Create a private network, which allows host-only access to the machine
# using a specific IP.
ingestfront.vm.network "private_network", ip: $front_ip
ingestfront.vm.provision "shell", inline: $record_ips
ingestfront.vm.provision "ansible" do |ansible|
ansible.playbook = "ansible/provision_ingest_front.yml"
# NOTE: NOT WORKING 2014-10
# ansible.vault_password_file = ENV['HOME']+ '/.ingest_vault_pswd.txt'
ansible.raw_arguments = "--vault-password-file=" + ENV["HOME"] + "/.ingest_vault_pswd.txt"
end
# ingestfront.vm.synced_folder ".", "/home/vagrant/ingest_deploy", create:true
ingestfront.vm.provider :virtualbox do |vbox|
vbox.name = 'ingestfront'
end
ingestfront.vm.provider :aws do |aws, override|
#aws.id = "i-7591c898"
aws.ami = "ami-b66ed3de"
aws.instance_type = 't2.medium'
aws.access_key_id = ENV['AWS_ACCESS_KEY']
aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY']
aws.keypair_name = ENV['AWS_KEYPAIR_NAME']
aws.region = ENV['AWS_DEFAULT_REGION']
aws.security_groups = ['sg-47c06122', 'sg-fcc06199']
aws.subnet_id = "subnet-54427312"
aws.associate_public_ip = true
aws.tags = { 'project' => 'ucldc',
'Name' => 'dev-ingestfront'
}
override.vm.box = "dummy"
override.vm.synced_folder ".", "/vagrant", disabled: true
override.ssh.username = ENV['EC2_USER']
override.ssh.private_key_path = ENV['EC2_PRIVATE_KEY_FILE']
end
end
config.vm.define "couchdb" do |couchdb|
couchdb.vm.box = "ingest_base"
couchdb.vm.network "private_network", ip: $couchdb_ip
couchdb.vm.provision "shell", inline: $record_ips
couchdb.vm.provision "ansible" do |ansible|
ansible.playbook = "ansible/provision_couchdb.yml"
ansible.vault_password_file = ENV['HOME']+ "/.ingest_vault_pswd.txt"
end
# couchdb.vm.synced_folder ".", "/home/vagrant/ingest_deploy", create:true
couchdb.vm.provider :virtualbox do |vbox|
vbox.name = 'couchdb'
end
couchdb.vm.provider :docker do |docker, override|
override.vm.box = nil
docker.image = 'mredar/couchdb'
docker.cmd = ['/run.sh']
docker.ports = ['5984:5984']
docker.volumes = ['/var/lib/couchdb']
docker.name = 'couchdb'
docker.env = { COUCHDP_PASS: ENV['COUCHDB_PASSWORD'] }
end
couchdb.vm.provider :aws do |aws, override|
aws.ami = "ami-b66ed3de"
aws.instance_type = 't2.medium'
aws.access_key_id = ENV['AWS_ACCESS_KEY']
aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY']
aws.keypair_name = ENV['AWS_KEYPAIR_NAME']
aws.region = ENV['AWS_DEFAULT_REGION']
aws.security_groups = ['sg-47c06122', 'sg-fcc06199']
aws.subnet_id = "subnet-54427312"
aws.associate_public_ip = true
# can't access to provision if in private subnet
# aws.subnet_id = "subnet-fddeca89"
aws.tags = { 'project' => 'ucldc',
'Name' => 'dev-couchdb'
}
override.vm.box = "dummy"
override.vm.synced_folder ".", "/vagrant", disabled: true
override.ssh.username = ENV['EC2_USER']
override.ssh.private_key_path = ENV['EC2_PRIVATE_KEY_FILE']
end
end
config.vm.define "solr" do |solr|
solr.vm.box = "ingest_base"
solr.vm.network "private_network", ip: $solr_ip
solr.vm.provision "shell", inline: $record_ips
solr.vm.provision "ansible" do |ansible|
ansible.playbook = "ansible/provision_solr.yml"
ansible.vault_password_file = ENV['HOME']+ "/.ingest_vault_pswd.txt"
end
# solr.vm.synced_folder ".", "/home/vagrant/ingest_deploy", create:true
solr.vm.provider :virtualbox do |vbox|
vbox.name = 'solr'
end
solr.vm.provider :aws do |aws, override|
aws.ami = "ami-b66ed3de"
aws.instance_type = 't2.medium'
aws.access_key_id = ENV['AWS_ACCESS_KEY']
aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY']
aws.keypair_name = ENV['AWS_KEYPAIR_NAME']
aws.region = ENV['AWS_DEFAULT_REGION']
aws.security_groups = ['sg-47c06122', 'sg-fcc06199']
aws.subnet_id = "subnet-54427312"
aws.associate_public_ip = true
#aws.subnet_id = "subnet-fddeca89"
aws.tags = { 'project' => 'ucldc',
'Name' => 'dev-solr'
}
override.vm.box = "dummy"
override.vm.synced_folder ".", "/vagrant", disabled: true
override.ssh.username = ENV['EC2_USER']
override.ssh.private_key_path = ENV['EC2_PRIVATE_KEY_FILE']
end
end
config.vm.define "worker" do |worker|
worker.vm.box = "ingest_base"
worker.vm.network "private_network", type: "dhcp"
worker.vm.provision "shell", inline: $record_ips
worker.vm.provision "ansible" do |ansible|
ansible.playbook = "ansible/provision_worker.yml"
ansible.vault_password_file = ENV['HOME']+ "/.ingest_vault_pswd.txt"
end
# worker.vm.synced_folder ".", "/home/vagrant/ingest_deploy", create:true
worker.vm.provider :virtualbox do |vbox|
vbox.name = 'worker'
end
worker.vm.provider :aws do |aws, override|
aws.ami = "ami-b66ed3de"
aws.instance_type = 't2.medium'
aws.access_key_id = ENV['AWS_ACCESS_KEY']
aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY']
aws.keypair_name = ENV['AWS_KEYPAIR_NAME']
aws.region = ENV['AWS_DEFAULT_REGION']
aws.security_groups = ['sg-47c06122', 'sg-fcc06199']
aws.subnet_id = "subnet-54427312"
aws.associate_public_ip = true
#aws.subnet_id = "subnet-fddeca89"
aws.tags = { 'project' => 'ucldc',
'Name' => 'dev-worker'
}
override.vm.box = "dummy"
override.vm.synced_folder ".", "/vagrant", disabled: true
override.ssh.username = ENV['EC2_USER']
override.ssh.private_key_path = ENV['EC2_PRIVATE_KEY_FILE']
override.ssh.pty = true
end
end
# box for rqworker & akara?
# worker needs to know the solr ip, couchdb ip and ingest front ip
# how to communicate across the ansible playbooks?
# for now, like current on ingestfront
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
# config.vm.network "forwarded_port", guest: 80, host: 8080
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "10.0.0.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# If true, then any SSH connections made will enable agent forwarding.
# Default value: false
# config.ssh.forward_agent = true
config.ssh.private_key_path = Array[
"~/.vagrant.d/insecure_private_key",
ENV['EC2_PRIVATE_KEY_FILE']
]
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
# config.vm.provider "virtualbox" do |vb|
# # Don't boot with headless mode
# vb.gui = true
#
# # Use VBoxManage to customize the VM. For example to change memory:
# vb.customize ["modifyvm", :id, "--memory", "1024"]
# end
#
# View the documentation for the provider you're using for more
# information on available options.
# Enable provisioning with CFEngine. CFEngine Community packages are
# automatically installed. For example, configure the host as a
# policy server and optionally a policy file to run:
#
# config.vm.provision "cfengine" do |cf|
# cf.am_policy_hub = true
# # cf.run_file = "motd.cf"
# end
#
# You can also configure and bootstrap a client to an existing
# policy server:
#
# config.vm.provision "cfengine" do |cf|
# cf.policy_server_address = "10.0.2.15"
# end
# Enable provisioning with Puppet stand alone. Puppet manifests
# are contained in a directory path relative to this Vagrantfile.
# You will need to create the manifests directory and a manifest in
# the file default.pp in the manifests_path directory.
#
# config.vm.provision "puppet" do |puppet|
# puppet.manifests_path = "manifests"
# puppet.manifest_file = "default.pp"
# end
# Enable provisioning with chef solo, specifying a cookbooks path, roles
# path, and data_bags path (all relative to this Vagrantfile), and adding
# some recipes and/or roles.
#
# config.vm.provision "chef_solo" do |chef|
# chef.cookbooks_path = "../my-recipes/cookbooks"
# chef.roles_path = "../my-recipes/roles"
# chef.data_bags_path = "../my-recipes/data_bags"
# chef.add_recipe "mysql"
# chef.add_role "web"
#
# # You may also specify custom JSON attributes:
# chef.json = { mysql_password: "foo" }
# end
# Enable provisioning with chef server, specifying the chef server URL,
# and the path to the validation key (relative to this Vagrantfile).
#
# The Opscode Platform uses HTTPS. Substitute your organization for
# ORGNAME in the URL and validation key.
#
# If you have your own Chef Server, use the appropriate URL, which may be
# HTTP instead of HTTPS depending on your configuration. Also change the
# validation key to validation.pem.
#
# config.vm.provision "chef_client" do |chef|
# chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME"
# chef.validation_key_path = "ORGNAME-validator.pem"
# end
#
# If you're using the Opscode platform, your validator client is
# ORGNAME-validator, replacing ORGNAME with your organization name.
#
# If you have your own Chef Server, the default validation client name is
# chef-validator, unless you changed the configuration.
#
# chef.validation_client_name = "ORGNAME-validator"
end