-
Notifications
You must be signed in to change notification settings - Fork 0
154 lines (137 loc) · 6.33 KB
/
deploy-rays-dashboard-production.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
name: Deploy Rays Dashboard Production
on:
workflow_dispatch:
jobs:
build:
name: Build and deploy Rays Dashboard
runs-on: ubuntu-latest
environment: production
env:
AWS_REGION: us-east-1
ENVIRONMENT_TAG: prod
SERVICE_NAME: summer-fi-rays-prod
CLUSTER_NAME: summer-fi-rays-prod
CONFIG_URL: ${{ secrets.CONFIG_URL }}
CONFIG_URL_RAYS: ${{ secrets.CONFIG_URL_RAYS }}
FUNCTIONS_API_URL: ${{ secrets.FUNCTIONS_API_URL }}
BORROW_DB_READ_CONNECTION_STRING: ${{ secrets.BORROW_DB_READ_CONNECTION_STRING }}
CONTENTFUL_ACCESS_TOKEN: ${{ secrets.CONTENTFUL_ACCESS_TOKEN }}
CONTENTFUL_PREVIEW_ACCESS_TOKEN: ${{ secrets.CONTENTFUL_PREVIEW_ACCESS_TOKEN }}
CONTENTFUL_SPACE_ID: ${{ secrets.CONTENTFUL_SPACE_ID }}
MIXPANEL_KEY: ${{ secrets.MIXPANEL_KEY }}
NEXT_PUBLIC_MIXPANEL_KEY: ${{ secrets.NEXT_PUBLIC_MIXPANEL_KEY }}
SUBGRAPH_BASE: ${{ secrets.SUBGRAPH_BASE }}
NEXT_TELEMETRY_DISABLED: ${{ secrets.NEXT_TELEMETRY_DISABLED }}
steps:
- name: Check out code
uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set up turbo cache
uses: rharkor/caching-for-turbo@v1.5
- uses: pnpm/action-setup@v2.0.1
with:
version: 8.14.1
- name: Setup Node.js environment
uses: actions/setup-node@v3
with:
node-version: 20
cache: 'pnpm'
- name: Setup Rays Dashboard App Next.js Cache
uses: actions/cache@v4
with:
path: ${{ github.workspace }}/apps/rays-dashboard/.next/cache
key:
${{ runner.os }}-rays-dashboard-app-${{ hashFiles('pnpm-lock.yaml') }}-${{
hashFiles('apps/rays-dashboard/**/*.ts', 'apps/rays-dashboard/**/*.tsx') }}
restore-keys: ${{ runner.os }}-rays-dashboard-app-${{ hashFiles('pnpm-lock.yaml') }}-
- name: Establish VPN connection
run: |
sudo apt update
sudo apt install -y openvpn openvpn-systemd-resolved
echo 'Configuring the VPN...'
echo "${{ secrets.VPN_CONFIG }}" > vpn-config.ovpn
echo "${{ secrets.VPN_USERNAME }}" > vpn-credentials.txt
echo "${{ secrets.VPN_PASSWORD }}" >> vpn-credentials.txt
echo 'Connecting to the VPN...'
sudo openvpn --config vpn-config.ovpn --auth-user-pass vpn-credentials.txt --daemon
sleep 5
- name: Check VPN connection
env:
BORROW_DB_READ_DB: ${{ secrets.BORROW_DB_READ_DB }}
BORROW_DB_READ_HOST: ${{ secrets.BORROW_DB_READ_HOST }}
BORROW_DB_READ_USER: ${{ secrets.BORROW_DB_READ_USER }}
BORROW_DB_READ_PASSWORD: ${{ secrets.BORROW_DB_READ_PASSWORD }}
PGCONNECT_TIMEOUT: 5
run: |
echo 'Checking the VPN connection...'
sudo systemctl start postgresql.service
PGPASSWORD=$BORROW_DB_READ_PASSWORD /usr/bin/psql -d $BORROW_DB_READ_DB -U $BORROW_DB_READ_USER -h $BORROW_DB_READ_HOST -c 'SELECT 1;' > /dev/null
STATUS_CODE=$?
if ! [[ "$STATUS_CODE" = 0 ]]; then
echo 'VPN connection failed'
exit 1
fi
echo 'VPN connected!'
- name: Extract commit hash
id: vars
shell: bash
run: |
echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"
- name: Install dependencies
run: pnpm install
- name: Prebuild
run: pnpm prebuild
- name: Build
run: pnpm build-rays-frontend
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_PROD }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PROD }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build docker image, copy build output and push to ECR
id: build-image
env:
LATEST_TAG: latest
ECR_REPO_NAME: summer-fi-rays-prod
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
SHA_TAG: ${{ steps.vars.outputs.sha_short }}
run: |
# Build a docker container and
# push it to ECR so that it can
# be deployed to ECS.
docker build -f apps/rays-dashboard/docker/Dockerfile \
--build-arg BORROW_DB_READ_CONNECTION_STRING=${{ secrets.BORROW_DB_READ_CONNECTION_STRING }} \
--build-arg CONTENTFUL_SPACE_ID=${{ secrets.CONTENTFUL_SPACE_ID }} \
--build-arg CONTENTFUL_ACCESS_TOKEN=${{ secrets.CONTENTFUL_ACCESS_TOKEN }} \
--build-arg CONTENTFUL_PREVIEW_ACCESS_TOKEN=${{ secrets.CONTENTFUL_PREVIEW_ACCESS_TOKEN }} \
--build-arg CONFIG_URL=${{ secrets.CONFIG_URL }} \
--build-arg CONFIG_URL_RAYS=${{ secrets.CONFIG_URL_RAYS }} \
--build-arg FUNCTIONS_API_URL=${{ secrets.FUNCTIONS_API_URL }} \
--build-arg MIXPANEL_KEY=${{ secrets.MIXPANEL_KEY }} \
--build-arg NEXT_PUBLIC_MIXPANEL_KEY=${{ secrets.NEXT_PUBLIC_MIXPANEL_KEY }} \
--build-arg NEXT_TELEMETRY_DISABLED=${{ secrets.NEXT_TELEMETRY_DISABLED }} \
--cache-from=$ECR_REGISTRY/$ECR_REPO_NAME:$LATEST_TAG \
-t $ECR_REGISTRY/$ECR_REPO_NAME:$SHA_TAG \
-t $ECR_REGISTRY/$ECR_REPO_NAME:$LATEST_TAG \
-t $ECR_REGISTRY/$ECR_REPO_NAME:$ENVIRONMENT_TAG \
./apps/rays-dashboard
docker push $ECR_REGISTRY/$ECR_REPO_NAME --all-tags
- name: Update ECS service with latest Docker image
id: service-update
run: |
aws ecs update-service --cluster $CLUSTER_NAME --service ${{ env.SERVICE_NAME }} --force-new-deployment --region $AWS_REGION
- name: Wait for all services to become stable
uses: oryanmoshe/ecs-wait-action@v1.3
with:
ecs-cluster: ${{ env.CLUSTER_NAME }}
ecs-services: '["${{ env.SERVICE_NAME }}"]'
- name: Invalidate CloudFront
env:
CF_DIST_ID: ${{ secrets.CF_DIST_ID }}
run: |
AWS_MAX_ATTEMPTS=10 aws cloudfront create-invalidation --distribution-id ${{ env.CF_DIST_ID }} --paths "/*"