-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env-template
77 lines (61 loc) · 2.83 KB
/
.env-template
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
## Will default to PIPELINE_COMPOSE_PROJECT in options.sh if not set
PIPELINE_NAME=
## Set to "development" to trigger local development seed data rather than production, even in production NODE_ENV.
## This is rarely used.
PIPELINE_SEED_ENV=production
## Can be used to mark the pipeline as inactive
PIPELINE_IS_ACTIVE=1
## Logs from all services
PIPELINE_LOG_PATH=
## Core Services - Primarily making overrides available for exposed ports for multiple systems on a machine.
PIPELINE_DATABASE_PORT=
PIPELINE_MESSAGE_API_PORT=
PIPELINE_MESSAGE_UI_PORT=
PIPELINE_INFLUX_PORT=
PIPELINE_GRAFANA_PORT=
# Opportunity to override default passwords/names for standard services.
PIPELINE_AUTH_PASS=
PIPELINE_DATABASE_PASS=
MESSAGE_QUEUE_COOKIE=
GRAFANA_ORG_NAME=
## Coordinator (api, scheduler, front-end client) - also to override exposed ports for conflicts/multiple systems.
PIPELINE_API_PORT=6001
PIPELINE_SCHEDULER_PORT=6002
PIPELINE_API_CLIENT_PORT=6101
# This needs to be the name of the host that the pipeline api container will run on.
PIPELINE_THUMBS_HOST=
PIPELINE_THUMBS_PORT=
## Mount an external volume - usually contains the task repositories.
EXTERNAL_DATA_VOLUME_1=
CONTAINER_DATA_VOLUME_1=
EXTERNAL_DATA_VOLUME_2=
CONTAINER_DATA_VOLUME_2=
##
## This section assumes a specific configuration of workers (four). For a system with four workers, the host values
## can simply be set as appropriate. To run with additional or fewer workers, modify the number of entries here
## and their corresponding section(s) in docker-compose.workers.yml.
##
## Worker values
# Same on all machines (assumes one worker per machine). This value must match that value used in pipeline-worker-api
# options.sh file on each worker machine.
PIPELINE_WORKER_API_PORT=
# Enables the scheduler (with the port value above) to connect and distribute tasks to workers.
PIPELINE_WORKER1_HOST=
PIPELINE_WORKER2_HOST=
PIPELINE_WORKER3_HOST=
PIPELINE_WORKER4_HOST=
# Unique port for each worker client front-end.
PIPELINE_WORKER1_CLIENT_PORT=
PIPELINE_WORKER2_CLIENT_PORT=
PIPELINE_WORKER3_CLIENT_PORT=
PIPELINE_WORKER4_CLIENT_PORT=
# Generally only required for utility functions running outside standard container operations. Unless running a custom
# configuration this can be left as the default.
PIPELINE_CORE_SERVICES_HOST=pipeline-db
# Environment for seed script. If not seeding with Mouse Light Project contents these can be ignored
PIPELINE_SEED_TASK_ROOT=/groups/mousebrainmicro/mousebrainmicro/pipeline-systems/pipeline-a/
PIPELINE_SEED_APP_ROOT=/groups/mousebrainmicro/mousebrainmicro/pipeline-systems/pipeline-a/apps
PIPELINE_SEED_TOOLS_ROOT=/groups/mousebrainmicro/mousebrainmicro/pipeline-systems/tools
PIPELINE_SEED_DATA_ROOT=/groups/mousebrainmicro/mousebrainmicro/data
PIPELINE_SEED_OUTPUT_ROOT=/nrs/mouselight/pipeline-testing
PIPELINE_SEED_USER_SCRATCH=/groups/mousebrainmicro/home