From 3471998975addbd43fbb67b0033095684d97f500 Mon Sep 17 00:00:00 2001 From: Chris Krenn Date: Sun, 8 Jan 2023 08:13:08 -0800 Subject: [PATCH 1/5] added static build/deploy to Makefile --- Makefile | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Makefile b/Makefile index ee14e1d2b..97478e2fa 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,7 @@ BASEURL ?= https://127.0.0.1.sslip.io E2E_RUN = cd e2e; CYPRESS_BASE_URL=$(BASEURL) export ENV_FILE = .env export TAG = $(shell grep -e ^TAG ${ENV_FILE} | awk -F'[=]' '{gsub(/ /,""); print $$2}') +export S3_BUCKET = $(shell grep -e ^S3_BUCKET ${ENV_FILE} | awk -F'[=]' '{gsub(/ /,""); print $$2}') export GIT_HASH = $(shell git rev-parse --short HEAD) export COMPOSE_FILE_ARGS = -f docker-compose.yml -f docker-compose.dev.yml @@ -73,6 +74,17 @@ start-FULL-REBUILD: echo_vars stop rm-ALL ## Remove and restart all Docker conta docker compose ${COMPOSE_FILE_ARGS} --env-file ${ENV_FILE} down docker compose ${COMPOSE_FILE_ARGS} --env-file ${ENV_FILE} up --build +extract-bundles: ## Extract bundles from file-server for cloud deployment + /bin/rm -rf build + docker cp polis-${TAG}-file-server-1:/app/build/ build + +upload-bundles: ## upload bundles to aws s3 + aws s3 cp build s3://${S3_BUCKET} \ + --recursive \ + --metadata-directive REPLACE \ + --acl public-read \ + --cache-control max-age=31536000 + e2e-install: e2e/node_modules ## Install Cypress E2E testing tools $(E2E_RUN) npm install From a9fee332f0401ccba1dd79c8edc858ba6940491d Mon Sep 17 00:00:00 2001 From: Bennie Rosas Date: Fri, 10 Nov 2023 05:05:02 -0600 Subject: [PATCH 2/5] small improvements to asset Makefile targets --- .gitignore | 1 + Makefile | 27 +++++++++++++++------------ docs/configuration.md | 3 ++- example.env | 2 ++ 4 files changed, 20 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index 01af81b3a..3d5708e5d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .env prod.env +build/ diff --git a/Makefile b/Makefile index 97478e2fa..599ca20c0 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ BASEURL ?= https://127.0.0.1.sslip.io E2E_RUN = cd e2e; CYPRESS_BASE_URL=$(BASEURL) export ENV_FILE = .env export TAG = $(shell grep -e ^TAG ${ENV_FILE} | awk -F'[=]' '{gsub(/ /,""); print $$2}') -export S3_BUCKET = $(shell grep -e ^S3_BUCKET ${ENV_FILE} | awk -F'[=]' '{gsub(/ /,""); print $$2}') +export S3_BUCKET = $(shell awk -F'=' '/^S3_BUCKET/ {gsub(/ /, "", $$2); print $$2}' ${ENV_FILE}) export GIT_HASH = $(shell git rev-parse --short HEAD) export COMPOSE_FILE_ARGS = -f docker-compose.yml -f docker-compose.dev.yml @@ -74,16 +74,19 @@ start-FULL-REBUILD: echo_vars stop rm-ALL ## Remove and restart all Docker conta docker compose ${COMPOSE_FILE_ARGS} --env-file ${ENV_FILE} down docker compose ${COMPOSE_FILE_ARGS} --env-file ${ENV_FILE} up --build -extract-bundles: ## Extract bundles from file-server for cloud deployment - /bin/rm -rf build - docker cp polis-${TAG}-file-server-1:/app/build/ build +build-web-assets: ## Build and extract static web assets for cloud deployment + docker compose ${COMPOSE_FILE_ARGS} --env-file ${ENV_FILE} create --build --force-recreate file-server + $(MAKE) extract-web-assets -upload-bundles: ## upload bundles to aws s3 - aws s3 cp build s3://${S3_BUCKET} \ - --recursive \ - --metadata-directive REPLACE \ - --acl public-read \ - --cache-control max-age=31536000 +extract-web-assets: ## Extract static web assets from file-server for cloud deployment + /bin/rm -rf build + docker compose ${COMPOSE_FILE_ARGS} --env-file ${ENV_FILE} cp file-server:/app/build/ build + +upload-web-assets: ## upload static web assets to aws s3 + aws s3 cp build s3://${S3_BUCKET} \ + --recursive \ + --metadata-directive REPLACE \ + --cache-control max-age=31536000 e2e-install: e2e/node_modules ## Install Cypress E2E testing tools $(E2E_RUN) npm install @@ -115,8 +118,8 @@ rbs: start-rebuild @true .PHONY: help pull start stop rm-containers rm-volumes rm-images rm-ALL hash build-no-cache start-rebuild \ - start-recreate restart-FULL-REBUILD e2e-install e2e-prepare e2e-run-minimal e2e-run-standalone e2e-run-secret \ - e2e-run-subset e2e-run-all + start-recreate restart-FULL-REBUILD e2e-install e2e-prepare e2e-run-minimal e2e-run-standalone e2e-run-secret \ + e2e-run-subset e2e-run-all build-web-assets extract-web-assets upload-web-assets help: @echo 'Usage: make ' diff --git a/docs/configuration.md b/docs/configuration.md index d30c82787..eaa79f86b 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -110,7 +110,7 @@ If you are deploying to a custom domain (not `pol.is`) then you need to update b - **`DOMAIN_WHITELIST_ITEM_01`** - **`08`** up to 8 possible additional whitelisted domains for client applications to make API requests from. Typical setups that use the same URL for the API service as for the public-facing web sites do not need to configure these. - **`EMBED_SERVICE_HOSTNAME`** should match **`API_DEV_HOSTNAME`** in production, or **`API_DEV_HOSTNAME`** in development. Embedded conversations make API requests to this host. - **`SERVICE_URL`** used by client-report to make API calls. Only necessary if client-report is hosted separately from the API service. Can be left blank. -- **`STATIC_FILES_HOST`** Used by the API service to fetch static assets (the compiled client applications) from a static file server. Within the docker compose setup this is `file-server`, but could be an external hostname, such as a CDN. +- **`STATIC_FILES_HOST`** Used by the API service to fetch static assets (the compiled client applications) from a static file server. Within the docker compose setup this is `file-server`, but could be an external hostname, such as a CDN or S3 bucket. ### Third Party API Credentials @@ -125,6 +125,7 @@ If you are deploying to a custom domain (not `pol.is`) then you need to update b - **`GOOGLE_CREDS_STRINGIFIED`** Alternative to **`GOOGLE_CREDENTIALS_BASE64`** (See below). - **`MAILGUN_API_KEY`**, **`MAILGUN_DOMAIN`** If using Mailgun as an email transport. - **`MAXMIND_LICENSEKEY`**, **`MAXMIND_USERID`** If using IP Geolocation service Maxmind. +- **`S3_BUCKET`** AWS S3 Bucket name. You can optionally use S3 to host static assets. - **`TWITTER_CONSUMER_KEY`**, **`TWITTER_CONSUMER_SECRET`** For Twitter integration. - **`AWS_ACCESS_KEY_ID`**, **`AWS_SECRET_ACCESS_KEY`** If using Amazon SES as an email transport. diff --git a/example.env b/example.env index 84dce9675..eb116c020 100644 --- a/example.env +++ b/example.env @@ -109,6 +109,8 @@ MAILGUN_API_KEY= MAILGUN_DOMAIN= MAXMIND_LICENSEKEY= MAXMIND_USERID= +# AWS S3 Bucket name. You can optionally use S3 to host static assets. +S3_BUCKET= TWITTER_CONSUMER_KEY= TWITTER_CONSUMER_SECRET= # Read from process.env by aws-sdk, if using SES for email transport From f2235798de1e82724cecbf3697fe5b25898309a5 Mon Sep 17 00:00:00 2001 From: Christopher Small Date: Tue, 28 Nov 2023 15:11:19 +1000 Subject: [PATCH 3/5] remove upload-web-assets make command This command does not function as intended, since some assets are compressed, and the `aws s3` command is not actually designed to do anything with the `*.headersJson` files which specify the metadata needed for s3 static site hosting to serve these assets correctly. --- Makefile | 6 ------ 1 file changed, 6 deletions(-) diff --git a/Makefile b/Makefile index dff18e419..525c999e2 100644 --- a/Makefile +++ b/Makefile @@ -81,12 +81,6 @@ extract-web-assets: ## Extract static web assets from file-server for cloud depl /bin/rm -rf build docker compose ${COMPOSE_FILE_ARGS} --env-file ${ENV_FILE} cp file-server:/app/build/ build -upload-web-assets: ## upload static web assets to aws s3 - aws s3 cp build s3://${S3_BUCKET} \ - --recursive \ - --metadata-directive REPLACE \ - --cache-control max-age=31536000 - e2e-install: e2e/node_modules ## Install Cypress E2E testing tools $(E2E_RUN) npm install From b14105bbd4f9e0d131b4d1aeb66092617a2f434b Mon Sep 17 00:00:00 2001 From: Christopher Small Date: Tue, 28 Nov 2023 15:13:28 +1000 Subject: [PATCH 4/5] Rewrite the bin/deploy-static-assets to use aws-cli This gets around authentication issues with IAM Identity Center users, which require SSO, and aren't well supported by all AWS SDKs. --- bin/deploy-static-assets.clj | 109 +++++++++++++---------------------- 1 file changed, 40 insertions(+), 69 deletions(-) diff --git a/bin/deploy-static-assets.clj b/bin/deploy-static-assets.clj index 1edadebcb..5b24f7311 100755 --- a/bin/deploy-static-assets.clj +++ b/bin/deploy-static-assets.clj @@ -1,23 +1,37 @@ #!/usr/bin/env bb -;; To use this script, you will need to install babashka: https://github.com/babashka/babashka#installation -;; If you have homebrew/linuxbrew installed, you can use: +;; This script is a utility for deploying static web assets to AWS S3, as an alternative to the `file-server` +;; container. ;; -;; brew install borkdude/brew/babashka +;; To use this script, you will need to [install babashka](https://github.com/babashka/babashka#installation) +;; and the AWS CLI. If you have homebrew/linuxbrew installed, you can accomplish both with: ;; -;; Before deploying, use `make PROD start-rebuild` to get the system running, then from another shell, run +;; brew install borkdude/brew/babashka awscli ;; -;; docker cp polis-prod-file-server-1:/app/build build +;; Before deploying, use ;; -;; to copy over all of the static assets from the container to local directory. -;; Next you will have to make sure that you have the AWS environment variables set. +;; make build-web-assets ;; -;; Then you should be able to run: +;; to build and extract the web assets into the `build` directory. ;; -;; ./bin/deploy-static-assets.clj --bucket preprod.pol.is --dist-path build +;; You may choose to run with either with `PROD` settings specified in your `prod.env` file +;; (`make PROD build-web-assets`), or with custom settings explicitly for deploying web assets +;; (e.g. a `prod-web-assets.env`) file with `make ENV_FILE=prod-web-assets.env extract-web-assets`). ;; -;; This deploys to the `preprod.pol.is` bucket. -;; To deploy to the production `pol.is` bucket, use instead `--bucket pol.is`. +;; Next you will have to make sure that you have the AWS environment variables set to authenticate the AWS +;; CLI. There are quite a few ways to do this, and we recommend following AWS documentation for this. Possible +;; routes include using `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables (not +;; recommended, since non-privileged processes can read these environment variables), setting these values in +;; your `~/.aws/config` file under a profile (either `default` or a custom profile if you set the +;; `AWS_PROFILE` environment variable), with a combination of the `~/.aws/config` file and the +;; `~/.aws/credentials` file, or with `aws sso login` if you are using AWS SSO (a.k.a. IAM Identity Center, +;; which is the recommended pathway by AWS for organizational human user authentication). This script just +;; calls out to the `aws` cli, so if it properly authenticated/authorized and functioning, this script should work. +;; +;; Once all this is set up, you should be able to run (e.g.): +;; +;; ./bin/deploy-static-assets.clj --bucket my-aws-s3-bucket-name --dist-path build + (require '[babashka.pods :as pods] '[babashka.deps :as deps] @@ -29,39 +43,7 @@ '[clojure.string :as string] '[cheshire.core :as json]) -(pods/load-pod 'org.babashka/aws "0.0.6") -(deps/add-deps '{:deps {honeysql/honeysql {:mvn/version "1.0.444"}}}) - -(require '[pod.babashka.aws :as aws] - '[pod.babashka.aws.credentials :as aws-creds]) - -;; Should move this to arg parsing if and when available -(def region (or (System/getenv "AWS_REGION") - "us-east-1")) - -(def creds-provider - (aws-creds/basic-credentials-provider - {:access-key-id (System/getenv "AWS_ACCESS_KEY") - :secret-access-key (System/getenv "AWS_SECRET_KEY")})) - -(def s3-client - "The s3 client for this process" - (aws/client {:api :s3 :region region :credentials-provider creds-provider})) - -;; list available s3 actions -;(map first (aws/ops s3-client)) -;; docs for specific action -;(aws/doc s3-client :ListObjects) -;(aws/doc s3-client :PutObject) - -;; basic listing contents example -;(aws/invoke s3-client {:op :ListObjects :request {:Bucket "pol.is"}}) -;(->> (:Contents (aws/invoke s3-client {:op :ListObjects :request {:Bucket "preprod.pol.is"}})) - ;(map :Key) - ;(filter #(re-matches #".*\.headersJson" %))) -;(->> (:Contents (aws/invoke s3-client {:op :ListObjects :request {:Bucket "preprod.pol.is"}})) - ;(filter #(re-matches #".*/fonts/.*" (:Key %)))) (defn file-extension [file] (keyword (second (re-find #"\.([a-zA-Z0-9]+)$" (str file))))) @@ -83,9 +65,6 @@ (def cache-buster-seconds 31536000); (def cache-buster (format "no-transform,public,max-age=%s,s-maxage=%s" cache-buster-seconds cache-buster-seconds)) -;(json/decode (slurp (io/file "build/embed.html.headersJson")) - ;(comp keyword #(clojure.string/replace % #"-" ""))) - (defn headers-json-data [file] (let [data (json/decode (slurp file) @@ -105,7 +84,8 @@ [bucket base-path file] (let [headers-file (io/file (str file ".headersJson"))] (merge - {:Bucket bucket + {:file file + :Bucket bucket :Body (io/input-stream (io/file file)) :Key (relative-path base-path file) :ACL "public-read"} @@ -124,32 +104,21 @@ (not (re-matches #".*\.headersJson" (str %))))) ;; omit, headersJson, since processed separately (map (partial file-upload-request bucket path)))) -; Inspect how this parses to AWS S3 requests -;(pp/pprint - ;(mapcat (partial spec-requests "preprod.pol.is") deploy-specs)) - -;; Check content type mappings -;(doseq [request - ;(mapcat (partial spec-requests "preprod.pol.is") deploy-specs)] - ;(println (:Key request) (:ContentType request))) - -;; test individual request -;(spec-requests "preprod.pol.is" (nth deploy-specs 5)) - - ;; synchonous execution - (defn process-deploy "Execute AWS S3 request, and return result" - [request] + [{:as request :keys [Bucket Key ACL ContentType CacheControl ContentEncoding file]}] (println "Processing request:" request) - [request (aws/invoke s3-client {:op :PutObject :request request})]) - -;(doseq [request (mapcat (partial spec-requests "preprod.pol.is") deploy-specs)] - ;(println "processing request for" (:Key request)) - ;(let [response (aws/invoke s3-client {:op :PutObject :request request})] - ;(println response)))) + [request + (process/sh "aws" "s3" "cp" + ;"--metadata" (json/encode (dissoc request :file :Bucket :Body :Key)) + ;"--acl" ACL + "--content-type" ContentType + "--content-encoding" ContentEncoding + "--metadata-directive" "REPLACE" + (str file) + (str "s3://" Bucket "/" Key))]) ;; process the aws requests asynchronously with parallelism 12 @@ -166,6 +135,8 @@ (defn responses [bucket path] (let [requests (upload-requests bucket path) output-chan (async/chan concurrent-requests)] + ;; pipeline pushes the request objects through the (map process-deploy) transducer in parallel, and + ;; collects results in the output chan (async/pipeline-blocking concurrent-requests output-chan (map process-deploy) @@ -173,7 +144,7 @@ (async/ Date: Tue, 28 Nov 2023 16:33:19 +1000 Subject: [PATCH 5/5] remove S3_BUCKET from make config This isn't needed, since we're reverting back to the old script for publishing to s3 --- Makefile | 1 - docs/configuration.md | 1 - example.env | 2 -- 3 files changed, 4 deletions(-) diff --git a/Makefile b/Makefile index 525c999e2..09805f304 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,6 @@ E2E_RUN = cd e2e; export ENV_FILE = .env export TAG = $(shell grep -e ^TAG ${ENV_FILE} | awk -F'[=]' '{gsub(/ /,""); print $$2}') -export S3_BUCKET = $(shell awk -F'=' '/^S3_BUCKET/ {gsub(/ /, "", $$2); print $$2}' ${ENV_FILE}) export GIT_HASH = $(shell git rev-parse --short HEAD) export COMPOSE_FILE_ARGS = -f docker-compose.yml -f docker-compose.dev.yml diff --git a/docs/configuration.md b/docs/configuration.md index eaa79f86b..8fa6c5bc3 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -125,7 +125,6 @@ If you are deploying to a custom domain (not `pol.is`) then you need to update b - **`GOOGLE_CREDS_STRINGIFIED`** Alternative to **`GOOGLE_CREDENTIALS_BASE64`** (See below). - **`MAILGUN_API_KEY`**, **`MAILGUN_DOMAIN`** If using Mailgun as an email transport. - **`MAXMIND_LICENSEKEY`**, **`MAXMIND_USERID`** If using IP Geolocation service Maxmind. -- **`S3_BUCKET`** AWS S3 Bucket name. You can optionally use S3 to host static assets. - **`TWITTER_CONSUMER_KEY`**, **`TWITTER_CONSUMER_SECRET`** For Twitter integration. - **`AWS_ACCESS_KEY_ID`**, **`AWS_SECRET_ACCESS_KEY`** If using Amazon SES as an email transport. diff --git a/example.env b/example.env index a491ca296..6b1cbf80b 100644 --- a/example.env +++ b/example.env @@ -109,8 +109,6 @@ MAILGUN_API_KEY= MAILGUN_DOMAIN= MAXMIND_LICENSEKEY= MAXMIND_USERID= -# AWS S3 Bucket name. You can optionally use S3 to host static assets. -S3_BUCKET= TWITTER_CONSUMER_KEY= TWITTER_CONSUMER_SECRET= # Read from process.env by aws-sdk, if using SES for email transport